From 1f7f8afdd7d3942e2fae10954982c1b21eb5b454 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 17:37:55 +0200 Subject: [PATCH 01/15] Fixed tests for NVIDIA, where it asserted with OUT_OF_MEMORY, probably due to higher alignment requirements. --- src/Tests.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Tests.cpp b/src/Tests.cpp index 30b8bf9..6dc236a 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -2199,9 +2199,9 @@ static void BenchmarkAlgorithmsCase(FILE* file, if(!empty) { - // Make allocations up to half of pool size. + // Make allocations up to 1/3 of pool size. VkDeviceSize totalSize = 0; - while(totalSize < poolCreateInfo.blockSize / 2) + while(totalSize < poolCreateInfo.blockSize / 3) { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); @@ -2221,7 +2221,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, } // BENCHMARK - const size_t allocCount = maxBufCapacity / 2; + const size_t allocCount = maxBufCapacity / 3; std::vector testAllocations; testAllocations.reserve(allocCount); duration allocTotalDuration = duration::zero(); @@ -2367,7 +2367,7 @@ static void BenchmarkAlgorithms(FILE* file) BenchmarkAlgorithmsCase( file, algorithm, - emptyIndex ? 0 : 1, // empty + (emptyIndex == 0), // empty strategy, freeOrder); // freeOrder } From b8d34d5e6a1c0f8be737cce5df5be413267c9ea5 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Wed, 3 Oct 2018 17:41:20 +0200 Subject: [PATCH 02/15] Replaced assert() with new macro TEST() in all tests, to check conditions also in Release configuration. # Conflicts: # src/SparseBindingTest.cpp # src/Tests.cpp # src/VulkanSample.cpp --- src/Common.h | 17 +- src/Tests.cpp | 368 +++++++++++++++++++++---------------------- src/VulkanSample.cpp | 9 +- 3 files changed, 208 insertions(+), 186 deletions(-) diff --git a/src/Common.h b/src/Common.h index 111ccde..0e32c78 100644 --- a/src/Common.h +++ b/src/Common.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -25,7 +26,21 @@ typedef std::chrono::high_resolution_clock::time_point time_point; typedef std::chrono::high_resolution_clock::duration duration; -#define ERR_GUARD_VULKAN(Expr) do { VkResult res__ = (Expr); if (res__ < 0) assert(0); } while(0) +#ifdef _DEBUG + #define TEST(expr) do { \ + if(!(expr)) { \ + assert(0 && #expr); \ + } \ + } while(0) +#else + #define TEST(expr) do { \ + if(!(expr)) { \ + throw std::runtime_error("TEST FAILED: " #expr); \ + } \ + } while(0) +#endif + +#define ERR_GUARD_VULKAN(expr) TEST((expr) >= 0) extern VkPhysicalDevice g_hPhysicalDevice; extern VkDevice g_hDevice; diff --git a/src/Tests.cpp b/src/Tests.cpp index 6dc236a..ebd7366 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -401,7 +401,7 @@ VkResult MainTest(Result& outResult, const Config& config) } else { - assert(0); + TEST(0); } return res; }; @@ -684,14 +684,14 @@ static void CreateBuffer( { outAllocInfo.m_StartValue = (uint32_t)rand(); uint32_t* data = (uint32_t*)vmaAllocInfo.pMappedData; - assert((data != nullptr) == persistentlyMapped); + TEST((data != nullptr) == persistentlyMapped); if(!persistentlyMapped) { ERR_GUARD_VULKAN( vmaMapMemory(g_hAllocator, outAllocInfo.m_Allocation, (void**)&data) ); } uint32_t value = outAllocInfo.m_StartValue; - assert(bufCreateInfo.size % 4 == 0); + TEST(bufCreateInfo.size % 4 == 0); for(size_t i = 0; i < bufCreateInfo.size / sizeof(uint32_t); ++i) data[i] = value++; @@ -726,7 +726,7 @@ static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator) VkResult res = vmaCreateBuffer(allocator, &bufferInfo, &vmaMemReq, &outAllocation.m_Buffer, &outAllocation.m_Allocation, &allocInfo); outAllocation.m_BufferInfo = bufferInfo; - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { @@ -752,18 +752,18 @@ static void CreateAllocation(AllocInfo& outAllocation, VmaAllocator allocator) VkResult res = vmaCreateImage(allocator, &imageInfo, &vmaMemReq, &outAllocation.m_Image, &outAllocation.m_Allocation, &allocInfo); outAllocation.m_ImageInfo = imageInfo; - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t* data = (uint32_t*)allocInfo.pMappedData; if(allocInfo.pMappedData == nullptr) { VkResult res = vmaMapMemory(allocator, outAllocation.m_Allocation, (void**)&data); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t value = outAllocation.m_StartValue; - assert(allocInfo.size % 4 == 0); + TEST(allocInfo.size % 4 == 0); for(size_t i = 0; i < allocInfo.size / sizeof(uint32_t); ++i) data[i] = value++; @@ -795,13 +795,13 @@ static void ValidateAllocationData(const AllocInfo& allocation) if(allocInfo.pMappedData == nullptr) { VkResult res = vmaMapMemory(g_hAllocator, allocation.m_Allocation, (void**)&data); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } uint32_t value = allocation.m_StartValue; bool ok = true; size_t i; - assert(allocInfo.size % 4 == 0); + TEST(allocInfo.size % 4 == 0); for(i = 0; i < allocInfo.size / sizeof(uint32_t); ++i) { if(data[i] != value++) @@ -810,7 +810,7 @@ static void ValidateAllocationData(const AllocInfo& allocation) break; } } - assert(ok); + TEST(ok); if(allocInfo.pMappedData == nullptr) vmaUnmapMemory(g_hAllocator, allocation.m_Allocation); @@ -826,29 +826,29 @@ static void RecreateAllocationResource(AllocInfo& allocation) vkDestroyBuffer(g_hDevice, allocation.m_Buffer, nullptr); VkResult res = vkCreateBuffer(g_hDevice, &allocation.m_BufferInfo, nullptr, &allocation.m_Buffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Just to silence validation layer warnings. VkMemoryRequirements vkMemReq; vkGetBufferMemoryRequirements(g_hDevice, allocation.m_Buffer, &vkMemReq); - assert(vkMemReq.size == allocation.m_BufferInfo.size); + TEST(vkMemReq.size == allocation.m_BufferInfo.size); res = vkBindBufferMemory(g_hDevice, allocation.m_Buffer, allocInfo.deviceMemory, allocInfo.offset); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { vkDestroyImage(g_hDevice, allocation.m_Image, nullptr); VkResult res = vkCreateImage(g_hDevice, &allocation.m_ImageInfo, nullptr, &allocation.m_Image); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Just to silence validation layer warnings. VkMemoryRequirements vkMemReq; vkGetImageMemoryRequirements(g_hDevice, allocation.m_Image, &vkMemReq); res = vkBindImageMemory(g_hDevice, allocation.m_Image, allocInfo.deviceMemory, allocInfo.offset); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } } @@ -942,8 +942,8 @@ void TestDefragmentationSimple() VmaDefragmentationStats defragStats; Defragment(allocations.data(), allocations.size(), nullptr, &defragStats); - assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); - assert(defragStats.deviceMemoryBlocksFreed >= 1); + TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); + TEST(defragStats.deviceMemoryBlocksFreed >= 1); ValidateAllocationsData(allocations.data(), allocations.size()); @@ -976,7 +976,7 @@ void TestDefragmentationSimple() { VmaDefragmentationStats defragStats; Defragment(allocations.data(), allocations.size(), &defragInfo, &defragStats); - assert(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); + TEST(defragStats.allocationsMoved > 0 && defragStats.bytesMoved > 0); } ValidateAllocationsData(allocations.data(), allocations.size()); @@ -1089,7 +1089,7 @@ void TestDefragmentationFull() VmaDefragmentationStats stats; VkResult res = vmaDefragment(g_hAllocator, vmaAllocations.data(), vmaAllocations.size(), allocationsChanged.data(), &defragmentationInfo, &stats); - assert(res >= 0); + TEST(res >= 0); float defragmentDuration = ToFloatSeconds(std::chrono::high_resolution_clock::now() - begTime); @@ -1142,15 +1142,15 @@ static void TestUserData() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pUserData = numberAsPointer); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pUserData = numberAsPointer); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == numberAsPointer); + TEST(allocInfo.pUserData == numberAsPointer); vmaSetAllocationUserData(g_hAllocator, alloc, pointerToSomething); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == pointerToSomething); + TEST(allocInfo.pUserData == pointerToSomething); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1173,22 +1173,22 @@ static void TestUserData() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf); - assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pUserData != nullptr && allocInfo.pUserData != name1Buf); + TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0); delete[] name1Buf; vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(strcmp(name1, (const char*)allocInfo.pUserData) == 0); + TEST(strcmp(name1, (const char*)allocInfo.pUserData) == 0); vmaSetAllocationUserData(g_hAllocator, alloc, (void*)name2); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(strcmp(name2, (const char*)allocInfo.pUserData) == 0); + TEST(strcmp(name2, (const char*)allocInfo.pUserData) == 0); vmaSetAllocationUserData(g_hAllocator, alloc, nullptr); vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.pUserData == nullptr); + TEST(allocInfo.pUserData == nullptr); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1213,7 +1213,7 @@ static void TestMemoryRequirements() // No requirements. res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); // Usage. @@ -1223,8 +1223,8 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = UINT32_MAX; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); + TEST(res == VK_SUCCESS); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); vmaDestroyBuffer(g_hAllocator, buf, alloc); // Required flags, preferred flags. @@ -1234,9 +1234,9 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = 0; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); - assert(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + TEST(res == VK_SUCCESS); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); + TEST(memProps->memoryTypes[allocInfo.memoryType].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); vmaDestroyBuffer(g_hAllocator, buf, alloc); // memoryTypeBits. @@ -1247,8 +1247,8 @@ static void TestMemoryRequirements() allocCreateInfo.memoryTypeBits = 1u << memType; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.memoryType == memType); + TEST(res == VK_SUCCESS); + TEST(allocInfo.memoryType == memType); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1263,12 +1263,12 @@ static void TestBasics() { VmaAllocation alloc = VK_NULL_HANDLE; vmaCreateLostAllocation(g_hAllocator, &alloc); - assert(alloc != VK_NULL_HANDLE); + TEST(alloc != VK_NULL_HANDLE); VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, alloc, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); - assert(allocInfo.size == 0); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.size == 0); vmaFreeMemory(g_hAllocator, alloc); } @@ -1285,7 +1285,7 @@ static void TestBasics() VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); @@ -1293,7 +1293,7 @@ static void TestBasics() allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaDestroyBuffer(g_hAllocator, buf, alloc); } @@ -1319,7 +1319,7 @@ void TestHeapSizeLimit() VmaAllocator hAllocator; VkResult res = vmaCreateAllocator(&allocatorCreateInfo, &hAllocator); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); struct Item { @@ -1344,7 +1344,7 @@ void TestHeapSizeLimit() { Item item; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, &ownAllocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -1356,7 +1356,7 @@ void TestHeapSizeLimit() VmaPool hPool; res = vmaCreatePool(hAllocator, &poolCreateInfo, &hPool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // 2. Allocate normal buffers from all the remaining memory. { @@ -1370,7 +1370,7 @@ void TestHeapSizeLimit() { Item item; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &item.hBuf, &item.hAlloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -1385,7 +1385,7 @@ void TestHeapSizeLimit() VkBuffer hBuf; VmaAllocation hAlloc; res = vmaCreateBuffer(hAllocator, &bufCreateInfo, &allocCreateInfo, &hBuf, &hAlloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } // Destroy everything. @@ -1424,14 +1424,14 @@ static void TestDebugMargin() allocCreateInfo.flags = (i == BUF_COUNT - 1) ? VMA_ALLOCATION_CREATE_MAPPED_BIT : 0; VkResult res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &buffers[i].Buffer, &buffers[i].Allocation, &allocInfo[i]); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Margin is preserved also at the beginning of a block. - assert(allocInfo[i].offset >= VMA_DEBUG_MARGIN); + TEST(allocInfo[i].offset >= VMA_DEBUG_MARGIN); if(i == BUF_COUNT - 1) { // Fill with data. - assert(allocInfo[i].pMappedData != nullptr); + TEST(allocInfo[i].pMappedData != nullptr); // Uncomment this "+ 1" to overwrite past end of allocation and check corruption detection. memset(allocInfo[i].pMappedData, 0xFF, bufInfo.size /* + 1 */); } @@ -1450,12 +1450,12 @@ static void TestDebugMargin() { if(allocInfo[i].deviceMemory == allocInfo[i - 1].deviceMemory) { - assert(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN); + TEST(allocInfo[i].offset >= allocInfo[i - 1].offset + VMA_DEBUG_MARGIN); } } VkResult res = vmaCheckCorruption(g_hAllocator, UINT32_MAX); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Destroy all buffers. for(size_t i = BUF_COUNT; i--; ) @@ -1480,7 +1480,7 @@ static void TestLinearAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = 1024 * 300; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; @@ -1488,7 +1488,7 @@ static void TestLinearAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -1514,8 +1514,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; bufSumSize += bufCreateInfo.size; @@ -1524,9 +1524,9 @@ static void TestLinearAllocator() // Validate pool stats. VmaPoolStats stats; vmaGetPoolStats(g_hAllocator, pool, &stats); - assert(stats.size == poolCreateInfo.blockSize); - assert(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize); - assert(stats.allocationCount == bufInfo.size()); + TEST(stats.size == poolCreateInfo.blockSize); + TEST(stats.unusedSize = poolCreateInfo.blockSize - bufSumSize); + TEST(stats.allocationCount == bufInfo.size()); // Destroy the buffers in random order. while(!bufInfo.empty()) @@ -1547,8 +1547,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1568,8 +1568,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1592,8 +1592,8 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(i == 0 || allocInfo.offset > prevOffset); + TEST(res == VK_SUCCESS); + TEST(i == 0 || allocInfo.offset > prevOffset); bufInfo.push_back(newBufInfo); prevOffset = allocInfo.offset; } @@ -1614,7 +1614,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } } @@ -1632,7 +1632,7 @@ static void TestLinearAllocator() } else { - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } ++debugIndex; } @@ -1663,18 +1663,18 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(upperAddress) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; } else { - assert(allocInfo.offset >= prevOffsetLower); + TEST(allocInfo.offset >= prevOffsetLower); prevOffsetLower = allocInfo.offset; } - assert(prevOffsetLower < prevOffsetUpper); + TEST(prevOffsetLower < prevOffsetUpper); bufInfo.push_back(newBufInfo); } @@ -1698,7 +1698,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } @@ -1729,15 +1729,15 @@ static void TestLinearAllocator() { if(upperAddress) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; } else { - assert(allocInfo.offset >= prevOffsetLower); + TEST(allocInfo.offset >= prevOffsetLower); prevOffsetLower = allocInfo.offset; } - assert(prevOffsetLower < prevOffsetUpper); + TEST(prevOffsetLower < prevOffsetUpper); bufInfo.push_back(newBufInfo); } } @@ -1763,7 +1763,7 @@ static void TestLinearAllocator() &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); if(res == VK_SUCCESS) { - assert(allocInfo.offset < prevOffsetUpper); + TEST(allocInfo.offset < prevOffsetUpper); prevOffsetUpper = allocInfo.offset; bufInfo.push_back(newBufInfo); } @@ -1834,13 +1834,13 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); firstNewOffset = allocInfo.offset; // Make sure at least one buffer from the beginning became lost. vmaGetAllocationInfo(g_hAllocator, bufInfo[0].Allocation, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); } // Allocate more buffers that CAN_MAKE_OTHER_LOST until we wrap-around with this. @@ -1854,7 +1854,7 @@ static void TestLinearAllocator() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); ++newCount; if(allocInfo.offset < firstNewOffset) @@ -1878,7 +1878,7 @@ static void TestLinearAllocator() size_t lostAllocCount = SIZE_MAX; vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostAllocCount); - assert(lostAllocCount > 0); + TEST(lostAllocCount > 0); size_t realLostAllocCount = 0; for(size_t i = 0; i < bufInfo.size(); ++i) @@ -1887,7 +1887,7 @@ static void TestLinearAllocator() if(allocInfo.deviceMemory == VK_NULL_HANDLE) ++realLostAllocCount; } - assert(realLostAllocCount == lostAllocCount); + TEST(realLostAllocCount == lostAllocCount); } // Destroy all the buffers in forward order. @@ -1915,11 +1915,11 @@ static void TestLinearAllocatorMultiBlock() VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -1938,7 +1938,7 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); if(lastMem && allocInfo.deviceMemory != lastMem) { @@ -1947,12 +1947,12 @@ static void TestLinearAllocatorMultiBlock() lastMem = allocInfo.deviceMemory; } - assert(bufInfo.size() > 2); + TEST(bufInfo.size() > 2); // Make sure that pool has now two blocks. VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 2); + TEST(poolStats.blockCount == 2); // Destroy all the buffers in random order. while(!bufInfo.empty()) @@ -1965,7 +1965,7 @@ static void TestLinearAllocatorMultiBlock() // Make sure that pool has now at most one block. vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount <= 1); + TEST(poolStats.blockCount <= 1); } // Test stack. @@ -1977,7 +1977,7 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); if(lastMem && allocInfo.deviceMemory != lastMem) { @@ -1986,7 +1986,7 @@ static void TestLinearAllocatorMultiBlock() lastMem = allocInfo.deviceMemory; } - assert(bufInfo.size() > 2); + TEST(bufInfo.size() > 2); // Add few more buffers. for(uint32_t i = 0; i < 5; ++i) @@ -1994,14 +1994,14 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } // Make sure that pool has now two blocks. VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 2); + TEST(poolStats.blockCount == 2); // Delete half of buffers, LIFO. for(size_t i = 0, countToDelete = bufInfo.size() / 2; i < countToDelete; ++i) @@ -2015,12 +2015,12 @@ static void TestLinearAllocatorMultiBlock() BufferInfo newBufInfo; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Make sure that pool has now one block. vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.blockCount == 1); + TEST(poolStats.blockCount == 1); // Delete all the remaining buffers, LIFO. while(!bufInfo.empty()) @@ -2052,7 +2052,7 @@ static void ManuallyTestLinearAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = 10 * 1024; poolCreateInfo.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT; @@ -2060,7 +2060,7 @@ static void ManuallyTestLinearAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -2087,19 +2087,19 @@ static void ManuallyTestLinearAllocator() bufCreateInfo.size = 32; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 32; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT; @@ -2107,19 +2107,19 @@ static void ManuallyTestLinearAllocator() bufCreateInfo.size = 128; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 16; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); VmaStats currStats; @@ -2170,7 +2170,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); poolCreateInfo.blockSize = bufSizeMax * maxBufCapacity; poolCreateInfo.flags |= algorithm; @@ -2178,12 +2178,12 @@ static void BenchmarkAlgorithmsCase(FILE* file, VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Buffer created just to get memory requirements. Never bound to any memory. VkBuffer dummyBuffer = VK_NULL_HANDLE; res = vkCreateBuffer(g_hDevice, &sampleBufCreateInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS && dummyBuffer); + TEST(res == VK_SUCCESS && dummyBuffer); VkMemoryRequirements memReq = {}; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2205,7 +2205,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); baseAllocations.push_back(alloc); totalSize += memReq.size; } @@ -2234,7 +2234,7 @@ static void BenchmarkAlgorithmsCase(FILE* file, { memReq.size = bufSizeMin + rand.Generate() % (bufSizeMax - bufSizeMin); res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); testAllocations.push_back(alloc); } allocTotalDuration += std::chrono::high_resolution_clock::now() - allocTimeBeg; @@ -2392,7 +2392,7 @@ static void TestPool_SameSize() { VkBuffer dummyBuffer; res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2419,7 +2419,7 @@ static void TestPool_SameSize() VmaPool pool; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); vmaSetCurrentFrameIndex(g_hAllocator, 1); @@ -2440,7 +2440,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2448,7 +2448,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY); } // Validate that no buffer is lost. Also check that they are not mapped. @@ -2456,8 +2456,8 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.pMappedData == nullptr); } // Free some percent of random items. @@ -2484,7 +2484,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } } @@ -2505,7 +2505,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2514,7 +2514,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); } // Next frame. @@ -2525,7 +2525,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2534,7 +2534,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory == VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory == VK_NULL_HANDLE); vmaDestroyBuffer(g_hAllocator, items[i].Buf, items[i].Alloc); } items.erase(items.begin(), items.begin() + BUF_COUNT); @@ -2544,7 +2544,7 @@ static void TestPool_SameSize() { VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, items[i].Alloc, &allocInfo); - assert(allocInfo.deviceMemory != VK_NULL_HANDLE); + TEST(allocInfo.deviceMemory != VK_NULL_HANDLE); } // Free one item. @@ -2555,11 +2555,11 @@ static void TestPool_SameSize() { VmaPoolStats poolStats = {}; vmaGetPoolStats(g_hAllocator, pool, &poolStats); - assert(poolStats.allocationCount == items.size()); - assert(poolStats.size = BUF_COUNT * BUF_SIZE); - assert(poolStats.unusedRangeCount == 1); - assert(poolStats.unusedRangeSizeMax == BUF_SIZE); - assert(poolStats.unusedSize == BUF_SIZE); + TEST(poolStats.allocationCount == items.size()); + TEST(poolStats.size = BUF_COUNT * BUF_SIZE); + TEST(poolStats.unusedRangeCount == 1); + TEST(poolStats.unusedRangeSizeMax == BUF_SIZE); + TEST(poolStats.unusedSize == BUF_SIZE); } // Free all remaining items. @@ -2572,7 +2572,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2591,8 +2591,8 @@ static void TestPool_SameSize() VmaDefragmentationStats defragmentationStats; res = vmaDefragment(g_hAllocator, allocationsToDefragment.data(), items.size(), nullptr, nullptr, &defragmentationStats); - assert(res == VK_SUCCESS); - assert(defragmentationStats.deviceMemoryBlocksFreed == 2); + TEST(res == VK_SUCCESS); + TEST(defragmentationStats.deviceMemoryBlocksFreed == 2); } // Free all remaining items. @@ -2609,7 +2609,7 @@ static void TestPool_SameSize() { BufItem item; res = vmaCreateBuffer(g_hAllocator, &bufferInfo, &allocInfo, &item.Buf, &item.Alloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); items.push_back(item); } @@ -2624,11 +2624,11 @@ static void TestPool_SameSize() // vmaMakePoolAllocationsLost. Only remaining 2 should be lost. size_t lostCount = 0xDEADC0DE; vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount); - assert(lostCount == 2); + TEST(lostCount == 2); // Make another call. Now 0 should be lost. vmaMakePoolAllocationsLost(g_hAllocator, pool, &lostCount); - assert(lostCount == 0); + TEST(lostCount == 0); // Make another call, with null count. Should not crash. vmaMakePoolAllocationsLost(g_hAllocator, pool, nullptr); @@ -2653,7 +2653,7 @@ static void TestPool_SameSize() VmaAllocation alloc = nullptr; res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &alloc, nullptr); - assert(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr); + TEST(res == VK_ERROR_OUT_OF_DEVICE_MEMORY && alloc == nullptr); } vmaDestroyPool(g_hAllocator, pool); @@ -2692,11 +2692,11 @@ static void TestAllocationsInitialization() poolCreateInfo.minBlockCount = 1; // To keep memory alive while pool exists. poolCreateInfo.maxBlockCount = 1; res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufInfo, &dummyBufAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VmaAllocationCreateInfo bufAllocCreateInfo = {}; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &bufAllocCreateInfo.pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Create one persistently mapped buffer to keep memory of this block mapped, // so that pointer to mapped data will remain (more or less...) valid even @@ -2706,7 +2706,7 @@ static void TestAllocationsInitialization() VkBuffer firstBuf; VmaAllocation firstAlloc; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &firstBuf, &firstAlloc, nullptr); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Test buffers. @@ -2718,13 +2718,13 @@ static void TestAllocationsInitialization() VmaAllocation alloc; VmaAllocationInfo allocInfo; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &bufAllocCreateInfo, &buf, &alloc, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); void* pMappedData; if(!persistentlyMapped) { res = vmaMapMemory(g_hAllocator, alloc, &pMappedData); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } else { @@ -2733,7 +2733,7 @@ static void TestAllocationsInitialization() // Validate initialized content bool valid = ValidatePattern(pMappedData, BUF_SIZE, 0xDC); - assert(valid); + TEST(valid); if(!persistentlyMapped) { @@ -2744,7 +2744,7 @@ static void TestAllocationsInitialization() // Validate freed content valid = ValidatePattern(pMappedData, BUF_SIZE, 0xEF); - assert(valid); + TEST(valid); } vmaDestroyBuffer(g_hAllocator, firstBuf, firstAlloc); @@ -2755,7 +2755,7 @@ static void TestPool_Benchmark( PoolTestResult& outResult, const PoolTestConfig& config) { - assert(config.ThreadCount > 0); + TEST(config.ThreadCount > 0); RandomNumberGenerator mainRand{config.RandSeed}; @@ -2788,7 +2788,7 @@ static void TestPool_Benchmark( { VkBuffer dummyBuffer; VkResult res = vkCreateBuffer(g_hDevice, &bufferInfo, nullptr, &dummyBuffer); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetBufferMemoryRequirements(g_hDevice, dummyBuffer, &memReq); @@ -2801,7 +2801,7 @@ static void TestPool_Benchmark( { VkImage dummyImage; VkResult res = vkCreateImage(g_hDevice, &imageInfo, nullptr, &dummyImage); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkMemoryRequirements memReq; vkGetImageMemoryRequirements(g_hDevice, dummyImage, &memReq); @@ -2825,7 +2825,7 @@ static void TestPool_Benchmark( else if(config.UsesImages()) memoryTypeBits = imageMemoryTypeBits; else - assert(0); + TEST(0); VmaPoolCreateInfo poolCreateInfo = {}; poolCreateInfo.memoryTypeIndex = 0; @@ -2840,7 +2840,7 @@ static void TestPool_Benchmark( VmaPool pool; VkResult res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Start time measurement - after creating pool and initializing data structures. time_point timeBeg = std::chrono::high_resolution_clock::now(); @@ -2899,8 +2899,8 @@ static void TestPool_Benchmark( const AllocationSize& allocSize = config.AllocationSizes[allocSizeIndex]; if(allocSize.BufferSizeMax > 0) { - assert(allocSize.BufferSizeMin > 0); - assert(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0); + TEST(allocSize.BufferSizeMin > 0); + TEST(allocSize.ImageSizeMin == 0 && allocSize.ImageSizeMax == 0); if(allocSize.BufferSizeMax == allocSize.BufferSizeMin) item.BufferSize = allocSize.BufferSizeMin; else @@ -2911,7 +2911,7 @@ static void TestPool_Benchmark( } else { - assert(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0); + TEST(allocSize.ImageSizeMin > 0 && allocSize.ImageSizeMax > 0); if(allocSize.ImageSizeMax == allocSize.ImageSizeMin) item.ImageSize.width = item.ImageSize.height = allocSize.ImageSizeMax; else @@ -2939,7 +2939,7 @@ static void TestPool_Benchmark( } else { - assert(item.ImageSize.width && item.ImageSize.height); + TEST(item.ImageSize.width && item.ImageSize.height); imageInfo.extent.width = item.ImageSize.width; imageInfo.extent.height = item.ImageSize.height; @@ -2966,7 +2966,7 @@ static void TestPool_Benchmark( // Determine which bufs we want to use in this frame. const size_t usedBufCount = (threadRand.Generate() % (config.UsedItemCountMax - config.UsedItemCountMin) + config.UsedItemCountMin) / config.ThreadCount; - assert(usedBufCount < usedItems.size() + unusedItems.size()); + TEST(usedBufCount < usedItems.size() + unusedItems.size()); // Move some used to unused. while(usedBufCount < usedItems.size()) { @@ -3100,7 +3100,7 @@ static void TestPool_Benchmark( } // Execute frames. - assert(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS); + TEST(config.ThreadCount <= MAXIMUM_WAIT_OBJECTS); for(uint32_t frameIndex = 0; frameIndex < config.FrameCount; ++frameIndex) { vmaSetCurrentFrameIndex(g_hAllocator, frameIndex); @@ -3186,11 +3186,11 @@ static void TestMapping() VmaPool pool = nullptr; if(testIndex == TEST_POOL) { - assert(memTypeIndex != UINT32_MAX); + TEST(memTypeIndex != UINT32_MAX); VmaPoolCreateInfo poolInfo = {}; poolInfo.memoryTypeIndex = memTypeIndex; res = vmaCreatePool(g_hAllocator, &poolInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } VkBufferCreateInfo bufInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -3213,56 +3213,56 @@ static void TestMapping() { res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &bufferInfos[i].Buffer, &bufferInfos[i].Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.pMappedData == nullptr); + TEST(res == VK_SUCCESS); + TEST(allocInfo.pMappedData == nullptr); memTypeIndex = allocInfo.memoryType; } // Map buffer 0. char* data00 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data00); - assert(res == VK_SUCCESS && data00 != nullptr); + TEST(res == VK_SUCCESS && data00 != nullptr); data00[0xFFFF] = data00[0]; // Map buffer 0 second time. char* data01 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[0].Allocation, (void**)&data01); - assert(res == VK_SUCCESS && data01 == data00); + TEST(res == VK_SUCCESS && data01 == data00); // Map buffer 1. char* data1 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[1].Allocation, (void**)&data1); - assert(res == VK_SUCCESS && data1 != nullptr); - assert(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size)); + TEST(res == VK_SUCCESS && data1 != nullptr); + TEST(!MemoryRegionsOverlap(data00, (size_t)bufInfo.size, data1, (size_t)bufInfo.size)); data1[0xFFFF] = data1[0]; // Unmap buffer 0 two times. vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation); vmaUnmapMemory(g_hAllocator, bufferInfos[0].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[0].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); // Unmap buffer 1. vmaUnmapMemory(g_hAllocator, bufferInfos[1].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[1].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); // Create 3rd buffer - persistently mapped. allocCreateInfo.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; res = vmaCreateBuffer(g_hAllocator, &bufInfo, &allocCreateInfo, &bufferInfos[2].Buffer, &bufferInfos[2].Allocation, &allocInfo); - assert(res == VK_SUCCESS && allocInfo.pMappedData != nullptr); + TEST(res == VK_SUCCESS && allocInfo.pMappedData != nullptr); // Map buffer 2. char* data2 = nullptr; res = vmaMapMemory(g_hAllocator, bufferInfos[2].Allocation, (void**)&data2); - assert(res == VK_SUCCESS && data2 == allocInfo.pMappedData); + TEST(res == VK_SUCCESS && data2 == allocInfo.pMappedData); data2[0xFFFF] = data2[0]; // Unmap buffer 2. vmaUnmapMemory(g_hAllocator, bufferInfos[2].Allocation); vmaGetAllocationInfo(g_hAllocator, bufferInfos[2].Allocation, &allocInfo); - assert(allocInfo.pMappedData == data2); + TEST(allocInfo.pMappedData == data2); // Destroy all buffers. for(size_t i = 3; i--; ) @@ -3295,11 +3295,11 @@ static void TestMappingMultithreaded() VmaPool pool = nullptr; if(testIndex == TEST_POOL) { - assert(memTypeIndex != UINT32_MAX); + TEST(memTypeIndex != UINT32_MAX); VmaPoolCreateInfo poolInfo = {}; poolInfo.memoryTypeIndex = memTypeIndex; res = vmaCreatePool(g_hAllocator, &poolInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); } VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; @@ -3350,7 +3350,7 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; VkResult res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &localAllocCreateInfo, &bufInfo.Buffer, &bufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(memTypeIndex == UINT32_MAX) memTypeIndex = allocInfo.memoryType; @@ -3360,28 +3360,28 @@ static void TestMappingMultithreaded() if(mode == MODE::PERSISTENTLY_MAPPED) { data = (char*)allocInfo.pMappedData; - assert(data != nullptr); + TEST(data != nullptr); } else if(mode == MODE::MAP_FOR_MOMENT || mode == MODE::MAP_FOR_LONGER || mode == MODE::MAP_TWO_TIMES) { - assert(data == nullptr); + TEST(data == nullptr); res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data); - assert(res == VK_SUCCESS && data != nullptr); + TEST(res == VK_SUCCESS && data != nullptr); if(mode == MODE::MAP_TWO_TIMES) { char* data2 = nullptr; res = vmaMapMemory(g_hAllocator, bufInfo.Allocation, (void**)&data2); - assert(res == VK_SUCCESS && data2 == data); + TEST(res == VK_SUCCESS && data2 == data); } } else if(mode == MODE::DONT_MAP) { - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); } else - assert(0); + TEST(0); // Test if reading and writing from the beginning and end of mapped memory doesn't crash. if(data) @@ -3394,9 +3394,9 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, bufInfo.Allocation, &allocInfo); if(mode == MODE::MAP_FOR_MOMENT) - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); else - assert(allocInfo.pMappedData == data); + TEST(allocInfo.pMappedData == data); } switch(rand.Generate() % 3) @@ -3420,7 +3420,7 @@ static void TestMappingMultithreaded() VmaAllocationInfo allocInfo; vmaGetAllocationInfo(g_hAllocator, bufInfos[bufferIndex].Allocation, &allocInfo); - assert(allocInfo.pMappedData == nullptr); + TEST(allocInfo.pMappedData == nullptr); } vmaDestroyBuffer(g_hAllocator, bufInfos[bufferIndex].Buffer, bufInfos[bufferIndex].Allocation); @@ -3578,7 +3578,7 @@ static void PerformCustomMainTest(FILE* file) Result result{}; VkResult res = MainTest(result, config); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); WriteMainTestResult(file, "Foo", "CustomTest", config, result); } @@ -3868,7 +3868,7 @@ static void PerformMainTests(FILE* file) Result result{}; VkResult res = MainTest(result, config); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); if(file) { WriteMainTestResult(file, CODE_DESCRIPTION, testDescription, config, result); @@ -4123,7 +4123,7 @@ static void BasicTestBuddyAllocator() VmaPoolCreateInfo poolCreateInfo = {}; VkResult res = vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &sampleBufCreateInfo, &sampleAllocCreateInfo, &poolCreateInfo.memoryTypeIndex); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); // Deliberately adding 1023 to test usable size smaller than memory block size. poolCreateInfo.blockSize = 1024 * 1024 + 1023; @@ -4132,7 +4132,7 @@ static void BasicTestBuddyAllocator() VmaPool pool = nullptr; res = vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); VkBufferCreateInfo bufCreateInfo = sampleBufCreateInfo; @@ -4146,26 +4146,26 @@ static void BasicTestBuddyAllocator() bufCreateInfo.size = 1024 * 256; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024 * 512; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); bufCreateInfo.size = 1024 * 128; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Test very small allocation, smaller than minimum node size. bufCreateInfo.size = 1; res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); // Test some small allocation with alignment requirement. @@ -4178,8 +4178,8 @@ static void BasicTestBuddyAllocator() newBufInfo.Buffer = VK_NULL_HANDLE; res = vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); - assert(allocInfo.offset % memReq.alignment == 0); + TEST(res == VK_SUCCESS); + TEST(allocInfo.offset % memReq.alignment == 0); bufInfo.push_back(newBufInfo); } @@ -4195,7 +4195,7 @@ static void BasicTestBuddyAllocator() bufCreateInfo.size = 1024 * (rand.Generate() % 32 + 1); res = vmaCreateBuffer(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &newBufInfo.Buffer, &newBufInfo.Allocation, &allocInfo); - assert(res == VK_SUCCESS); + TEST(res == VK_SUCCESS); bufInfo.push_back(newBufInfo); } diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index d33861e..99727f3 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -1714,7 +1714,14 @@ static LRESULT WINAPI WndProc(HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) PostMessage(hWnd, WM_CLOSE, 0, 0); break; case 'T': - Test(); + try + { + Test(); + } + catch(const std::exception& ex) + { + printf("ERROR: %s\n", ex.what()); + } break; } return 0; From ad0989bfb4843a683434c14d201ece8c1ce39fc1 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Tue, 9 Oct 2018 13:26:33 +0200 Subject: [PATCH 03/15] Fixed bug in VmaAllocator_T::Defragment. --- src/vk_mem_alloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 2c4f5ed..be6deb2 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -12516,7 +12516,7 @@ VkResult VmaAllocator_T::Defragment( { if(pAllocationsChanged != VMA_NULL) { - memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged)); + memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32)); } if(pDefragmentationStats != VMA_NULL) { From fa87ae34a6165176020bd8624af64e6b300a3d87 Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Mon, 15 Oct 2018 18:15:11 +0200 Subject: [PATCH 04/15] Small addition to documentation. --- docs/html/general_considerations.html | 1 + docs/html/vk__mem__alloc_8h_source.html | 260 ++++++++++++------------ src/vk_mem_alloc.h | 4 + 3 files changed, 135 insertions(+), 130 deletions(-) diff --git a/docs/html/general_considerations.html b/docs/html/general_considerations.html index b3e7dea..c1bf26f 100644 --- a/docs/html/general_considerations.html +++ b/docs/html/general_considerations.html @@ -113,6 +113,7 @@ Features not supported
  • Support for sparse binding and sparse residency. You can still use these features (when supported by the device) with VMA. You just need to do it yourself. Any explicit support for sparse binding/residency would rather require another, higher-level library on top of VMA.
  • Data transfer - issuing commands that transfer data between buffers or images, any usage of VkCommandList or VkQueue and related synchronization is responsibility of the user.
  • Allocations for imported/exported external memory. They tend to require explicit memory type index and dedicated allocation anyway, so they don't interact with main features of this library. Such special purpose allocations should be made manually, using vkCreateBuffer() and vkAllocateMemory().
  • +
  • Handling CPU memory allocation failures. When dynamically creating small C++ objects in CPU memory (not Vulkan memory), allocation failures are not checked and handled gracefully, because that would complicate code significantly and is usually not needed in desktop PC applications anyway.
  • Support for any programming languages other than C/C++. Bindings to other languages are welcomed as external projects.
  • diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 31faa2f..abe9954 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,188 +65,188 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1460 /*
    1461 Define this macro to 0/1 to disable/enable support for recording functionality,
    1462 available through VmaAllocatorCreateInfo::pRecordSettings.
    1463 */
    1464 #ifndef VMA_RECORDING_ENABLED
    1465  #ifdef _WIN32
    1466  #define VMA_RECORDING_ENABLED 1
    1467  #else
    1468  #define VMA_RECORDING_ENABLED 0
    1469  #endif
    1470 #endif
    1471 
    1472 #ifndef NOMINMAX
    1473  #define NOMINMAX // For windows.h
    1474 #endif
    1475 
    1476 #include <vulkan/vulkan.h>
    1477 
    1478 #if VMA_RECORDING_ENABLED
    1479  #include <windows.h>
    1480 #endif
    1481 
    1482 #if !defined(VMA_DEDICATED_ALLOCATION)
    1483  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1484  #define VMA_DEDICATED_ALLOCATION 1
    1485  #else
    1486  #define VMA_DEDICATED_ALLOCATION 0
    1487  #endif
    1488 #endif
    1489 
    1499 VK_DEFINE_HANDLE(VmaAllocator)
    1500 
    1501 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1503  VmaAllocator allocator,
    1504  uint32_t memoryType,
    1505  VkDeviceMemory memory,
    1506  VkDeviceSize size);
    1508 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1509  VmaAllocator allocator,
    1510  uint32_t memoryType,
    1511  VkDeviceMemory memory,
    1512  VkDeviceSize size);
    1513 
    1527 
    1557 
    1560 typedef VkFlags VmaAllocatorCreateFlags;
    1561 
    1566 typedef struct VmaVulkanFunctions {
    1567  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1568  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1569  PFN_vkAllocateMemory vkAllocateMemory;
    1570  PFN_vkFreeMemory vkFreeMemory;
    1571  PFN_vkMapMemory vkMapMemory;
    1572  PFN_vkUnmapMemory vkUnmapMemory;
    1573  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1574  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1575  PFN_vkBindBufferMemory vkBindBufferMemory;
    1576  PFN_vkBindImageMemory vkBindImageMemory;
    1577  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1578  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1579  PFN_vkCreateBuffer vkCreateBuffer;
    1580  PFN_vkDestroyBuffer vkDestroyBuffer;
    1581  PFN_vkCreateImage vkCreateImage;
    1582  PFN_vkDestroyImage vkDestroyImage;
    1583 #if VMA_DEDICATED_ALLOCATION
    1584  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1585  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1586 #endif
    1588 
    1590 typedef enum VmaRecordFlagBits {
    1597 
    1600 typedef VkFlags VmaRecordFlags;
    1601 
    1603 typedef struct VmaRecordSettings
    1604 {
    1614  const char* pFilePath;
    1616 
    1619 {
    1623 
    1624  VkPhysicalDevice physicalDevice;
    1626 
    1627  VkDevice device;
    1629 
    1632 
    1633  const VkAllocationCallbacks* pAllocationCallbacks;
    1635 
    1674  const VkDeviceSize* pHeapSizeLimit;
    1695 
    1697 VkResult vmaCreateAllocator(
    1698  const VmaAllocatorCreateInfo* pCreateInfo,
    1699  VmaAllocator* pAllocator);
    1700 
    1702 void vmaDestroyAllocator(
    1703  VmaAllocator allocator);
    1704 
    1710  VmaAllocator allocator,
    1711  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1720 
    1728  VmaAllocator allocator,
    1729  uint32_t memoryTypeIndex,
    1730  VkMemoryPropertyFlags* pFlags);
    1731 
    1741  VmaAllocator allocator,
    1742  uint32_t frameIndex);
    1743 
    1746 typedef struct VmaStatInfo
    1747 {
    1749  uint32_t blockCount;
    1755  VkDeviceSize usedBytes;
    1757  VkDeviceSize unusedBytes;
    1760 } VmaStatInfo;
    1761 
    1763 typedef struct VmaStats
    1764 {
    1765  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1766  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1768 } VmaStats;
    1769 
    1771 void vmaCalculateStats(
    1772  VmaAllocator allocator,
    1773  VmaStats* pStats);
    1774 
    1775 #define VMA_STATS_STRING_ENABLED 1
    1776 
    1777 #if VMA_STATS_STRING_ENABLED
    1778 
    1780 
    1782 void vmaBuildStatsString(
    1783  VmaAllocator allocator,
    1784  char** ppStatsString,
    1785  VkBool32 detailedMap);
    1786 
    1787 void vmaFreeStatsString(
    1788  VmaAllocator allocator,
    1789  char* pStatsString);
    1790 
    1791 #endif // #if VMA_STATS_STRING_ENABLED
    1792 
    1801 VK_DEFINE_HANDLE(VmaPool)
    1802 
    1803 typedef enum VmaMemoryUsage
    1804 {
    1853 } VmaMemoryUsage;
    1854 
    1869 
    1924 
    1937 
    1947 
    1954 
    1958 
    1960 {
    1973  VkMemoryPropertyFlags requiredFlags;
    1978  VkMemoryPropertyFlags preferredFlags;
    1986  uint32_t memoryTypeBits;
    1999  void* pUserData;
    2001 
    2018 VkResult vmaFindMemoryTypeIndex(
    2019  VmaAllocator allocator,
    2020  uint32_t memoryTypeBits,
    2021  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2022  uint32_t* pMemoryTypeIndex);
    2023 
    2037  VmaAllocator allocator,
    2038  const VkBufferCreateInfo* pBufferCreateInfo,
    2039  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2040  uint32_t* pMemoryTypeIndex);
    2041 
    2055  VmaAllocator allocator,
    2056  const VkImageCreateInfo* pImageCreateInfo,
    2057  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2058  uint32_t* pMemoryTypeIndex);
    2059 
    2080 
    2097 
    2108 
    2114 
    2117 typedef VkFlags VmaPoolCreateFlags;
    2118 
    2121 typedef struct VmaPoolCreateInfo {
    2136  VkDeviceSize blockSize;
    2165 
    2168 typedef struct VmaPoolStats {
    2171  VkDeviceSize size;
    2174  VkDeviceSize unusedSize;
    2187  VkDeviceSize unusedRangeSizeMax;
    2190  size_t blockCount;
    2191 } VmaPoolStats;
    2192 
    2199 VkResult vmaCreatePool(
    2200  VmaAllocator allocator,
    2201  const VmaPoolCreateInfo* pCreateInfo,
    2202  VmaPool* pPool);
    2203 
    2206 void vmaDestroyPool(
    2207  VmaAllocator allocator,
    2208  VmaPool pool);
    2209 
    2216 void vmaGetPoolStats(
    2217  VmaAllocator allocator,
    2218  VmaPool pool,
    2219  VmaPoolStats* pPoolStats);
    2220 
    2228  VmaAllocator allocator,
    2229  VmaPool pool,
    2230  size_t* pLostAllocationCount);
    2231 
    2246 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2247 
    2272 VK_DEFINE_HANDLE(VmaAllocation)
    2273 
    2274 
    2276 typedef struct VmaAllocationInfo {
    2281  uint32_t memoryType;
    2290  VkDeviceMemory deviceMemory;
    2295  VkDeviceSize offset;
    2300  VkDeviceSize size;
    2314  void* pUserData;
    2316 
    2327 VkResult vmaAllocateMemory(
    2328  VmaAllocator allocator,
    2329  const VkMemoryRequirements* pVkMemoryRequirements,
    2330  const VmaAllocationCreateInfo* pCreateInfo,
    2331  VmaAllocation* pAllocation,
    2332  VmaAllocationInfo* pAllocationInfo);
    2333 
    2341  VmaAllocator allocator,
    2342  VkBuffer buffer,
    2343  const VmaAllocationCreateInfo* pCreateInfo,
    2344  VmaAllocation* pAllocation,
    2345  VmaAllocationInfo* pAllocationInfo);
    2346 
    2348 VkResult vmaAllocateMemoryForImage(
    2349  VmaAllocator allocator,
    2350  VkImage image,
    2351  const VmaAllocationCreateInfo* pCreateInfo,
    2352  VmaAllocation* pAllocation,
    2353  VmaAllocationInfo* pAllocationInfo);
    2354 
    2356 void vmaFreeMemory(
    2357  VmaAllocator allocator,
    2358  VmaAllocation allocation);
    2359 
    2377  VmaAllocator allocator,
    2378  VmaAllocation allocation,
    2379  VmaAllocationInfo* pAllocationInfo);
    2380 
    2395 VkBool32 vmaTouchAllocation(
    2396  VmaAllocator allocator,
    2397  VmaAllocation allocation);
    2398 
    2413  VmaAllocator allocator,
    2414  VmaAllocation allocation,
    2415  void* pUserData);
    2416 
    2428  VmaAllocator allocator,
    2429  VmaAllocation* pAllocation);
    2430 
    2465 VkResult vmaMapMemory(
    2466  VmaAllocator allocator,
    2467  VmaAllocation allocation,
    2468  void** ppData);
    2469 
    2474 void vmaUnmapMemory(
    2475  VmaAllocator allocator,
    2476  VmaAllocation allocation);
    2477 
    2490 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2491 
    2504 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2505 
    2522 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2523 
    2525 typedef struct VmaDefragmentationInfo {
    2530  VkDeviceSize maxBytesToMove;
    2537 
    2539 typedef struct VmaDefragmentationStats {
    2541  VkDeviceSize bytesMoved;
    2543  VkDeviceSize bytesFreed;
    2549 
    2588 VkResult vmaDefragment(
    2589  VmaAllocator allocator,
    2590  VmaAllocation* pAllocations,
    2591  size_t allocationCount,
    2592  VkBool32* pAllocationsChanged,
    2593  const VmaDefragmentationInfo *pDefragmentationInfo,
    2594  VmaDefragmentationStats* pDefragmentationStats);
    2595 
    2608 VkResult vmaBindBufferMemory(
    2609  VmaAllocator allocator,
    2610  VmaAllocation allocation,
    2611  VkBuffer buffer);
    2612 
    2625 VkResult vmaBindImageMemory(
    2626  VmaAllocator allocator,
    2627  VmaAllocation allocation,
    2628  VkImage image);
    2629 
    2656 VkResult vmaCreateBuffer(
    2657  VmaAllocator allocator,
    2658  const VkBufferCreateInfo* pBufferCreateInfo,
    2659  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2660  VkBuffer* pBuffer,
    2661  VmaAllocation* pAllocation,
    2662  VmaAllocationInfo* pAllocationInfo);
    2663 
    2675 void vmaDestroyBuffer(
    2676  VmaAllocator allocator,
    2677  VkBuffer buffer,
    2678  VmaAllocation allocation);
    2679 
    2681 VkResult vmaCreateImage(
    2682  VmaAllocator allocator,
    2683  const VkImageCreateInfo* pImageCreateInfo,
    2684  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2685  VkImage* pImage,
    2686  VmaAllocation* pAllocation,
    2687  VmaAllocationInfo* pAllocationInfo);
    2688 
    2700 void vmaDestroyImage(
    2701  VmaAllocator allocator,
    2702  VkImage image,
    2703  VmaAllocation allocation);
    2704 
    2705 #ifdef __cplusplus
    2706 }
    2707 #endif
    2708 
    2709 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2710 
    2711 // For Visual Studio IntelliSense.
    2712 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2713 #define VMA_IMPLEMENTATION
    2714 #endif
    2715 
    2716 #ifdef VMA_IMPLEMENTATION
    2717 #undef VMA_IMPLEMENTATION
    2718 
    2719 #include <cstdint>
    2720 #include <cstdlib>
    2721 #include <cstring>
    2722 
    2723 /*******************************************************************************
    2724 CONFIGURATION SECTION
    2725 
    2726 Define some of these macros before each #include of this header or change them
    2727 here if you need other then default behavior depending on your environment.
    2728 */
    2729 
    2730 /*
    2731 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2732 internally, like:
    2733 
    2734  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2735 
    2736 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2737 VmaAllocatorCreateInfo::pVulkanFunctions.
    2738 */
    2739 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2740 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2741 #endif
    2742 
    2743 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2744 //#define VMA_USE_STL_CONTAINERS 1
    2745 
    2746 /* Set this macro to 1 to make the library including and using STL containers:
    2747 std::pair, std::vector, std::list, std::unordered_map.
    2748 
    2749 Set it to 0 or undefined to make the library using its own implementation of
    2750 the containers.
    2751 */
    2752 #if VMA_USE_STL_CONTAINERS
    2753  #define VMA_USE_STL_VECTOR 1
    2754  #define VMA_USE_STL_UNORDERED_MAP 1
    2755  #define VMA_USE_STL_LIST 1
    2756 #endif
    2757 
    2758 #if VMA_USE_STL_VECTOR
    2759  #include <vector>
    2760 #endif
    2761 
    2762 #if VMA_USE_STL_UNORDERED_MAP
    2763  #include <unordered_map>
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_LIST
    2767  #include <list>
    2768 #endif
    2769 
    2770 /*
    2771 Following headers are used in this CONFIGURATION section only, so feel free to
    2772 remove them if not needed.
    2773 */
    2774 #include <cassert> // for assert
    2775 #include <algorithm> // for min, max
    2776 #include <mutex> // for std::mutex
    2777 #include <atomic> // for std::atomic
    2778 
    2779 #ifndef VMA_NULL
    2780  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2781  #define VMA_NULL nullptr
    2782 #endif
    2783 
    2784 #if defined(__APPLE__) || defined(__ANDROID__)
    2785 #include <cstdlib>
    2786 void *aligned_alloc(size_t alignment, size_t size)
    2787 {
    2788  // alignment must be >= sizeof(void*)
    2789  if(alignment < sizeof(void*))
    2790  {
    2791  alignment = sizeof(void*);
    2792  }
    2793 
    2794  void *pointer;
    2795  if(posix_memalign(&pointer, alignment, size) == 0)
    2796  return pointer;
    2797  return VMA_NULL;
    2798 }
    2799 #endif
    2800 
    2801 // If your compiler is not compatible with C++11 and definition of
    2802 // aligned_alloc() function is missing, uncommeting following line may help:
    2803 
    2804 //#include <malloc.h>
    2805 
    2806 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2807 #ifndef VMA_ASSERT
    2808  #ifdef _DEBUG
    2809  #define VMA_ASSERT(expr) assert(expr)
    2810  #else
    2811  #define VMA_ASSERT(expr)
    2812  #endif
    2813 #endif
    2814 
    2815 // Assert that will be called very often, like inside data structures e.g. operator[].
    2816 // Making it non-empty can make program slow.
    2817 #ifndef VMA_HEAVY_ASSERT
    2818  #ifdef _DEBUG
    2819  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2820  #else
    2821  #define VMA_HEAVY_ASSERT(expr)
    2822  #endif
    2823 #endif
    2824 
    2825 #ifndef VMA_ALIGN_OF
    2826  #define VMA_ALIGN_OF(type) (__alignof(type))
    2827 #endif
    2828 
    2829 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2830  #if defined(_WIN32)
    2831  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2832  #else
    2833  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2834  #endif
    2835 #endif
    2836 
    2837 #ifndef VMA_SYSTEM_FREE
    2838  #if defined(_WIN32)
    2839  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2840  #else
    2841  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2842  #endif
    2843 #endif
    2844 
    2845 #ifndef VMA_MIN
    2846  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2847 #endif
    2848 
    2849 #ifndef VMA_MAX
    2850  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2851 #endif
    2852 
    2853 #ifndef VMA_SWAP
    2854  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2855 #endif
    2856 
    2857 #ifndef VMA_SORT
    2858  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2859 #endif
    2860 
    2861 #ifndef VMA_DEBUG_LOG
    2862  #define VMA_DEBUG_LOG(format, ...)
    2863  /*
    2864  #define VMA_DEBUG_LOG(format, ...) do { \
    2865  printf(format, __VA_ARGS__); \
    2866  printf("\n"); \
    2867  } while(false)
    2868  */
    2869 #endif
    2870 
    2871 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2872 #if VMA_STATS_STRING_ENABLED
    2873  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2874  {
    2875  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2876  }
    2877  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2878  {
    2879  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2880  }
    2881  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2882  {
    2883  snprintf(outStr, strLen, "%p", ptr);
    2884  }
    2885 #endif
    2886 
    2887 #ifndef VMA_MUTEX
    2888  class VmaMutex
    2889  {
    2890  public:
    2891  VmaMutex() { }
    2892  ~VmaMutex() { }
    2893  void Lock() { m_Mutex.lock(); }
    2894  void Unlock() { m_Mutex.unlock(); }
    2895  private:
    2896  std::mutex m_Mutex;
    2897  };
    2898  #define VMA_MUTEX VmaMutex
    2899 #endif
    2900 
    2901 /*
    2902 If providing your own implementation, you need to implement a subset of std::atomic:
    2903 
    2904 - Constructor(uint32_t desired)
    2905 - uint32_t load() const
    2906 - void store(uint32_t desired)
    2907 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2908 */
    2909 #ifndef VMA_ATOMIC_UINT32
    2910  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2911 #endif
    2912 
    2913 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2914 
    2918  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_ALIGNMENT
    2922 
    2926  #define VMA_DEBUG_ALIGNMENT (1)
    2927 #endif
    2928 
    2929 #ifndef VMA_DEBUG_MARGIN
    2930 
    2934  #define VMA_DEBUG_MARGIN (0)
    2935 #endif
    2936 
    2937 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2938 
    2942  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2943 #endif
    2944 
    2945 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2946 
    2951  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2952 #endif
    2953 
    2954 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2955 
    2959  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2963 
    2967  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2968 #endif
    2969 
    2970 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2971  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2973 #endif
    2974 
    2975 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2976  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2978 #endif
    2979 
    2980 #ifndef VMA_CLASS_NO_COPY
    2981  #define VMA_CLASS_NO_COPY(className) \
    2982  private: \
    2983  className(const className&) = delete; \
    2984  className& operator=(const className&) = delete;
    2985 #endif
    2986 
    2987 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2988 
    2989 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2990 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2991 
    2992 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    2993 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    2994 
    2995 /*******************************************************************************
    2996 END OF CONFIGURATION
    2997 */
    2998 
    2999 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3000  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3001 
    3002 // Returns number of bits set to 1 in (v).
    3003 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3004 {
    3005  uint32_t c = v - ((v >> 1) & 0x55555555);
    3006  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3007  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3008  c = ((c >> 8) + c) & 0x00FF00FF;
    3009  c = ((c >> 16) + c) & 0x0000FFFF;
    3010  return c;
    3011 }
    3012 
    3013 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3014 // Use types like uint32_t, uint64_t as T.
    3015 template <typename T>
    3016 static inline T VmaAlignUp(T val, T align)
    3017 {
    3018  return (val + align - 1) / align * align;
    3019 }
    3020 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3021 // Use types like uint32_t, uint64_t as T.
    3022 template <typename T>
    3023 static inline T VmaAlignDown(T val, T align)
    3024 {
    3025  return val / align * align;
    3026 }
    3027 
    3028 // Division with mathematical rounding to nearest number.
    3029 template <typename T>
    3030 static inline T VmaRoundDiv(T x, T y)
    3031 {
    3032  return (x + (y / (T)2)) / y;
    3033 }
    3034 
    3035 /*
    3036 Returns true if given number is a power of two.
    3037 T must be unsigned integer number or signed integer but always nonnegative.
    3038 For 0 returns true.
    3039 */
    3040 template <typename T>
    3041 inline bool VmaIsPow2(T x)
    3042 {
    3043  return (x & (x-1)) == 0;
    3044 }
    3045 
    3046 // Returns smallest power of 2 greater or equal to v.
    3047 static inline uint32_t VmaNextPow2(uint32_t v)
    3048 {
    3049  v--;
    3050  v |= v >> 1;
    3051  v |= v >> 2;
    3052  v |= v >> 4;
    3053  v |= v >> 8;
    3054  v |= v >> 16;
    3055  v++;
    3056  return v;
    3057 }
    3058 static inline uint64_t VmaNextPow2(uint64_t v)
    3059 {
    3060  v--;
    3061  v |= v >> 1;
    3062  v |= v >> 2;
    3063  v |= v >> 4;
    3064  v |= v >> 8;
    3065  v |= v >> 16;
    3066  v |= v >> 32;
    3067  v++;
    3068  return v;
    3069 }
    3070 
    3071 // Returns largest power of 2 less or equal to v.
    3072 static inline uint32_t VmaPrevPow2(uint32_t v)
    3073 {
    3074  v |= v >> 1;
    3075  v |= v >> 2;
    3076  v |= v >> 4;
    3077  v |= v >> 8;
    3078  v |= v >> 16;
    3079  v = v ^ (v >> 1);
    3080  return v;
    3081 }
    3082 static inline uint64_t VmaPrevPow2(uint64_t v)
    3083 {
    3084  v |= v >> 1;
    3085  v |= v >> 2;
    3086  v |= v >> 4;
    3087  v |= v >> 8;
    3088  v |= v >> 16;
    3089  v |= v >> 32;
    3090  v = v ^ (v >> 1);
    3091  return v;
    3092 }
    3093 
    3094 static inline bool VmaStrIsEmpty(const char* pStr)
    3095 {
    3096  return pStr == VMA_NULL || *pStr == '\0';
    3097 }
    3098 
    3099 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3100 {
    3101  switch(algorithm)
    3102  {
    3104  return "Linear";
    3106  return "Buddy";
    3107  case 0:
    3108  return "Default";
    3109  default:
    3110  VMA_ASSERT(0);
    3111  return "";
    3112  }
    3113 }
    3114 
    3115 #ifndef VMA_SORT
    3116 
    3117 template<typename Iterator, typename Compare>
    3118 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3119 {
    3120  Iterator centerValue = end; --centerValue;
    3121  Iterator insertIndex = beg;
    3122  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3123  {
    3124  if(cmp(*memTypeIndex, *centerValue))
    3125  {
    3126  if(insertIndex != memTypeIndex)
    3127  {
    3128  VMA_SWAP(*memTypeIndex, *insertIndex);
    3129  }
    3130  ++insertIndex;
    3131  }
    3132  }
    3133  if(insertIndex != centerValue)
    3134  {
    3135  VMA_SWAP(*insertIndex, *centerValue);
    3136  }
    3137  return insertIndex;
    3138 }
    3139 
    3140 template<typename Iterator, typename Compare>
    3141 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3142 {
    3143  if(beg < end)
    3144  {
    3145  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3146  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3147  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3148  }
    3149 }
    3150 
    3151 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3152 
    3153 #endif // #ifndef VMA_SORT
    3154 
    3155 /*
    3156 Returns true if two memory blocks occupy overlapping pages.
    3157 ResourceA must be in less memory offset than ResourceB.
    3158 
    3159 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3160 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3161 */
    3162 static inline bool VmaBlocksOnSamePage(
    3163  VkDeviceSize resourceAOffset,
    3164  VkDeviceSize resourceASize,
    3165  VkDeviceSize resourceBOffset,
    3166  VkDeviceSize pageSize)
    3167 {
    3168  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3169  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3170  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3171  VkDeviceSize resourceBStart = resourceBOffset;
    3172  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3173  return resourceAEndPage == resourceBStartPage;
    3174 }
    3175 
    3176 enum VmaSuballocationType
    3177 {
    3178  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3179  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3180  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3181  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3182  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3183  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3184  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3185 };
    3186 
    3187 /*
    3188 Returns true if given suballocation types could conflict and must respect
    3189 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3190 or linear image and another one is optimal image. If type is unknown, behave
    3191 conservatively.
    3192 */
    3193 static inline bool VmaIsBufferImageGranularityConflict(
    3194  VmaSuballocationType suballocType1,
    3195  VmaSuballocationType suballocType2)
    3196 {
    3197  if(suballocType1 > suballocType2)
    3198  {
    3199  VMA_SWAP(suballocType1, suballocType2);
    3200  }
    3201 
    3202  switch(suballocType1)
    3203  {
    3204  case VMA_SUBALLOCATION_TYPE_FREE:
    3205  return false;
    3206  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3207  return true;
    3208  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3209  return
    3210  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3211  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3212  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3213  return
    3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3216  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3217  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3218  return
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3220  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3221  return false;
    3222  default:
    3223  VMA_ASSERT(0);
    3224  return true;
    3225  }
    3226 }
    3227 
    3228 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3229 {
    3230  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3231  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3232  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3233  {
    3234  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3235  }
    3236 }
    3237 
    3238 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3239 {
    3240  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3241  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3242  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3243  {
    3244  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3245  {
    3246  return false;
    3247  }
    3248  }
    3249  return true;
    3250 }
    3251 
    3252 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3253 struct VmaMutexLock
    3254 {
    3255  VMA_CLASS_NO_COPY(VmaMutexLock)
    3256 public:
    3257  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3258  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3259  {
    3260  if(m_pMutex)
    3261  {
    3262  m_pMutex->Lock();
    3263  }
    3264  }
    3265 
    3266  ~VmaMutexLock()
    3267  {
    3268  if(m_pMutex)
    3269  {
    3270  m_pMutex->Unlock();
    3271  }
    3272  }
    3273 
    3274 private:
    3275  VMA_MUTEX* m_pMutex;
    3276 };
    3277 
    3278 #if VMA_DEBUG_GLOBAL_MUTEX
    3279  static VMA_MUTEX gDebugGlobalMutex;
    3280  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3281 #else
    3282  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3283 #endif
    3284 
    3285 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3286 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3287 
    3288 /*
    3289 Performs binary search and returns iterator to first element that is greater or
    3290 equal to (key), according to comparison (cmp).
    3291 
    3292 Cmp should return true if first argument is less than second argument.
    3293 
    3294 Returned value is the found element, if present in the collection or place where
    3295 new element with value (key) should be inserted.
    3296 */
    3297 template <typename CmpLess, typename IterT, typename KeyT>
    3298 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3299 {
    3300  size_t down = 0, up = (end - beg);
    3301  while(down < up)
    3302  {
    3303  const size_t mid = (down + up) / 2;
    3304  if(cmp(*(beg+mid), key))
    3305  {
    3306  down = mid + 1;
    3307  }
    3308  else
    3309  {
    3310  up = mid;
    3311  }
    3312  }
    3313  return beg + down;
    3314 }
    3315 
    3317 // Memory allocation
    3318 
    3319 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3320 {
    3321  if((pAllocationCallbacks != VMA_NULL) &&
    3322  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3323  {
    3324  return (*pAllocationCallbacks->pfnAllocation)(
    3325  pAllocationCallbacks->pUserData,
    3326  size,
    3327  alignment,
    3328  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3329  }
    3330  else
    3331  {
    3332  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3333  }
    3334 }
    3335 
    3336 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3337 {
    3338  if((pAllocationCallbacks != VMA_NULL) &&
    3339  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3340  {
    3341  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3342  }
    3343  else
    3344  {
    3345  VMA_SYSTEM_FREE(ptr);
    3346  }
    3347 }
    3348 
    3349 template<typename T>
    3350 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3351 {
    3352  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3353 }
    3354 
    3355 template<typename T>
    3356 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3357 {
    3358  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3359 }
    3360 
    3361 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3362 
    3363 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3364 
    3365 template<typename T>
    3366 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3367 {
    3368  ptr->~T();
    3369  VmaFree(pAllocationCallbacks, ptr);
    3370 }
    3371 
    3372 template<typename T>
    3373 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3374 {
    3375  if(ptr != VMA_NULL)
    3376  {
    3377  for(size_t i = count; i--; )
    3378  {
    3379  ptr[i].~T();
    3380  }
    3381  VmaFree(pAllocationCallbacks, ptr);
    3382  }
    3383 }
    3384 
    3385 // STL-compatible allocator.
    3386 template<typename T>
    3387 class VmaStlAllocator
    3388 {
    3389 public:
    3390  const VkAllocationCallbacks* const m_pCallbacks;
    3391  typedef T value_type;
    3392 
    3393  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3394  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3395 
    3396  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3397  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3398 
    3399  template<typename U>
    3400  bool operator==(const VmaStlAllocator<U>& rhs) const
    3401  {
    3402  return m_pCallbacks == rhs.m_pCallbacks;
    3403  }
    3404  template<typename U>
    3405  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3406  {
    3407  return m_pCallbacks != rhs.m_pCallbacks;
    3408  }
    3409 
    3410  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3411 };
    3412 
    3413 #if VMA_USE_STL_VECTOR
    3414 
    3415 #define VmaVector std::vector
    3416 
    3417 template<typename T, typename allocatorT>
    3418 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3419 {
    3420  vec.insert(vec.begin() + index, item);
    3421 }
    3422 
    3423 template<typename T, typename allocatorT>
    3424 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3425 {
    3426  vec.erase(vec.begin() + index);
    3427 }
    3428 
    3429 #else // #if VMA_USE_STL_VECTOR
    3430 
    3431 /* Class with interface compatible with subset of std::vector.
    3432 T must be POD because constructors and destructors are not called and memcpy is
    3433 used for these objects. */
    3434 template<typename T, typename AllocatorT>
    3435 class VmaVector
    3436 {
    3437 public:
    3438  typedef T value_type;
    3439 
    3440  VmaVector(const AllocatorT& allocator) :
    3441  m_Allocator(allocator),
    3442  m_pArray(VMA_NULL),
    3443  m_Count(0),
    3444  m_Capacity(0)
    3445  {
    3446  }
    3447 
    3448  VmaVector(size_t count, const AllocatorT& allocator) :
    3449  m_Allocator(allocator),
    3450  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3451  m_Count(count),
    3452  m_Capacity(count)
    3453  {
    3454  }
    3455 
    3456  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3457  m_Allocator(src.m_Allocator),
    3458  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3459  m_Count(src.m_Count),
    3460  m_Capacity(src.m_Count)
    3461  {
    3462  if(m_Count != 0)
    3463  {
    3464  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3465  }
    3466  }
    3467 
    3468  ~VmaVector()
    3469  {
    3470  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3471  }
    3472 
    3473  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3474  {
    3475  if(&rhs != this)
    3476  {
    3477  resize(rhs.m_Count);
    3478  if(m_Count != 0)
    3479  {
    3480  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3481  }
    3482  }
    3483  return *this;
    3484  }
    3485 
    3486  bool empty() const { return m_Count == 0; }
    3487  size_t size() const { return m_Count; }
    3488  T* data() { return m_pArray; }
    3489  const T* data() const { return m_pArray; }
    3490 
    3491  T& operator[](size_t index)
    3492  {
    3493  VMA_HEAVY_ASSERT(index < m_Count);
    3494  return m_pArray[index];
    3495  }
    3496  const T& operator[](size_t index) const
    3497  {
    3498  VMA_HEAVY_ASSERT(index < m_Count);
    3499  return m_pArray[index];
    3500  }
    3501 
    3502  T& front()
    3503  {
    3504  VMA_HEAVY_ASSERT(m_Count > 0);
    3505  return m_pArray[0];
    3506  }
    3507  const T& front() const
    3508  {
    3509  VMA_HEAVY_ASSERT(m_Count > 0);
    3510  return m_pArray[0];
    3511  }
    3512  T& back()
    3513  {
    3514  VMA_HEAVY_ASSERT(m_Count > 0);
    3515  return m_pArray[m_Count - 1];
    3516  }
    3517  const T& back() const
    3518  {
    3519  VMA_HEAVY_ASSERT(m_Count > 0);
    3520  return m_pArray[m_Count - 1];
    3521  }
    3522 
    3523  void reserve(size_t newCapacity, bool freeMemory = false)
    3524  {
    3525  newCapacity = VMA_MAX(newCapacity, m_Count);
    3526 
    3527  if((newCapacity < m_Capacity) && !freeMemory)
    3528  {
    3529  newCapacity = m_Capacity;
    3530  }
    3531 
    3532  if(newCapacity != m_Capacity)
    3533  {
    3534  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3535  if(m_Count != 0)
    3536  {
    3537  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3538  }
    3539  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3540  m_Capacity = newCapacity;
    3541  m_pArray = newArray;
    3542  }
    3543  }
    3544 
    3545  void resize(size_t newCount, bool freeMemory = false)
    3546  {
    3547  size_t newCapacity = m_Capacity;
    3548  if(newCount > m_Capacity)
    3549  {
    3550  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3551  }
    3552  else if(freeMemory)
    3553  {
    3554  newCapacity = newCount;
    3555  }
    3556 
    3557  if(newCapacity != m_Capacity)
    3558  {
    3559  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3560  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3561  if(elementsToCopy != 0)
    3562  {
    3563  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3564  }
    3565  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3566  m_Capacity = newCapacity;
    3567  m_pArray = newArray;
    3568  }
    3569 
    3570  m_Count = newCount;
    3571  }
    3572 
    3573  void clear(bool freeMemory = false)
    3574  {
    3575  resize(0, freeMemory);
    3576  }
    3577 
    3578  void insert(size_t index, const T& src)
    3579  {
    3580  VMA_HEAVY_ASSERT(index <= m_Count);
    3581  const size_t oldCount = size();
    3582  resize(oldCount + 1);
    3583  if(index < oldCount)
    3584  {
    3585  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3586  }
    3587  m_pArray[index] = src;
    3588  }
    3589 
    3590  void remove(size_t index)
    3591  {
    3592  VMA_HEAVY_ASSERT(index < m_Count);
    3593  const size_t oldCount = size();
    3594  if(index < oldCount - 1)
    3595  {
    3596  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3597  }
    3598  resize(oldCount - 1);
    3599  }
    3600 
    3601  void push_back(const T& src)
    3602  {
    3603  const size_t newIndex = size();
    3604  resize(newIndex + 1);
    3605  m_pArray[newIndex] = src;
    3606  }
    3607 
    3608  void pop_back()
    3609  {
    3610  VMA_HEAVY_ASSERT(m_Count > 0);
    3611  resize(size() - 1);
    3612  }
    3613 
    3614  void push_front(const T& src)
    3615  {
    3616  insert(0, src);
    3617  }
    3618 
    3619  void pop_front()
    3620  {
    3621  VMA_HEAVY_ASSERT(m_Count > 0);
    3622  remove(0);
    3623  }
    3624 
    3625  typedef T* iterator;
    3626 
    3627  iterator begin() { return m_pArray; }
    3628  iterator end() { return m_pArray + m_Count; }
    3629 
    3630 private:
    3631  AllocatorT m_Allocator;
    3632  T* m_pArray;
    3633  size_t m_Count;
    3634  size_t m_Capacity;
    3635 };
    3636 
    3637 template<typename T, typename allocatorT>
    3638 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3639 {
    3640  vec.insert(index, item);
    3641 }
    3642 
    3643 template<typename T, typename allocatorT>
    3644 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3645 {
    3646  vec.remove(index);
    3647 }
    3648 
    3649 #endif // #if VMA_USE_STL_VECTOR
    3650 
    3651 template<typename CmpLess, typename VectorT>
    3652 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3653 {
    3654  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3655  vector.data(),
    3656  vector.data() + vector.size(),
    3657  value,
    3658  CmpLess()) - vector.data();
    3659  VmaVectorInsert(vector, indexToInsert, value);
    3660  return indexToInsert;
    3661 }
    3662 
    3663 template<typename CmpLess, typename VectorT>
    3664 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3665 {
    3666  CmpLess comparator;
    3667  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3668  vector.begin(),
    3669  vector.end(),
    3670  value,
    3671  comparator);
    3672  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3673  {
    3674  size_t indexToRemove = it - vector.begin();
    3675  VmaVectorRemove(vector, indexToRemove);
    3676  return true;
    3677  }
    3678  return false;
    3679 }
    3680 
    3681 template<typename CmpLess, typename IterT, typename KeyT>
    3682 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3683 {
    3684  CmpLess comparator;
    3685  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3686  beg, end, value, comparator);
    3687  if(it == end ||
    3688  (!comparator(*it, value) && !comparator(value, *it)))
    3689  {
    3690  return it;
    3691  }
    3692  return end;
    3693 }
    3694 
    3696 // class VmaPoolAllocator
    3697 
    3698 /*
    3699 Allocator for objects of type T using a list of arrays (pools) to speed up
    3700 allocation. Number of elements that can be allocated is not bounded because
    3701 allocator can create multiple blocks.
    3702 */
    3703 template<typename T>
    3704 class VmaPoolAllocator
    3705 {
    3706  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3707 public:
    3708  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3709  ~VmaPoolAllocator();
    3710  void Clear();
    3711  T* Alloc();
    3712  void Free(T* ptr);
    3713 
    3714 private:
    3715  union Item
    3716  {
    3717  uint32_t NextFreeIndex;
    3718  T Value;
    3719  };
    3720 
    3721  struct ItemBlock
    3722  {
    3723  Item* pItems;
    3724  uint32_t FirstFreeIndex;
    3725  };
    3726 
    3727  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3728  size_t m_ItemsPerBlock;
    3729  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3730 
    3731  ItemBlock& CreateNewBlock();
    3732 };
    3733 
    3734 template<typename T>
    3735 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3736  m_pAllocationCallbacks(pAllocationCallbacks),
    3737  m_ItemsPerBlock(itemsPerBlock),
    3738  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3739 {
    3740  VMA_ASSERT(itemsPerBlock > 0);
    3741 }
    3742 
    3743 template<typename T>
    3744 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3745 {
    3746  Clear();
    3747 }
    3748 
    3749 template<typename T>
    3750 void VmaPoolAllocator<T>::Clear()
    3751 {
    3752  for(size_t i = m_ItemBlocks.size(); i--; )
    3753  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3754  m_ItemBlocks.clear();
    3755 }
    3756 
    3757 template<typename T>
    3758 T* VmaPoolAllocator<T>::Alloc()
    3759 {
    3760  for(size_t i = m_ItemBlocks.size(); i--; )
    3761  {
    3762  ItemBlock& block = m_ItemBlocks[i];
    3763  // This block has some free items: Use first one.
    3764  if(block.FirstFreeIndex != UINT32_MAX)
    3765  {
    3766  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3767  block.FirstFreeIndex = pItem->NextFreeIndex;
    3768  return &pItem->Value;
    3769  }
    3770  }
    3771 
    3772  // No block has free item: Create new one and use it.
    3773  ItemBlock& newBlock = CreateNewBlock();
    3774  Item* const pItem = &newBlock.pItems[0];
    3775  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3776  return &pItem->Value;
    3777 }
    3778 
    3779 template<typename T>
    3780 void VmaPoolAllocator<T>::Free(T* ptr)
    3781 {
    3782  // Search all memory blocks to find ptr.
    3783  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3784  {
    3785  ItemBlock& block = m_ItemBlocks[i];
    3786 
    3787  // Casting to union.
    3788  Item* pItemPtr;
    3789  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3790 
    3791  // Check if pItemPtr is in address range of this block.
    3792  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3793  {
    3794  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3795  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3796  block.FirstFreeIndex = index;
    3797  return;
    3798  }
    3799  }
    3800  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3801 }
    3802 
    3803 template<typename T>
    3804 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3805 {
    3806  ItemBlock newBlock = {
    3807  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3808 
    3809  m_ItemBlocks.push_back(newBlock);
    3810 
    3811  // Setup singly-linked list of all free items in this block.
    3812  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3813  newBlock.pItems[i].NextFreeIndex = i + 1;
    3814  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3815  return m_ItemBlocks.back();
    3816 }
    3817 
    3819 // class VmaRawList, VmaList
    3820 
    3821 #if VMA_USE_STL_LIST
    3822 
    3823 #define VmaList std::list
    3824 
    3825 #else // #if VMA_USE_STL_LIST
    3826 
    3827 template<typename T>
    3828 struct VmaListItem
    3829 {
    3830  VmaListItem* pPrev;
    3831  VmaListItem* pNext;
    3832  T Value;
    3833 };
    3834 
    3835 // Doubly linked list.
    3836 template<typename T>
    3837 class VmaRawList
    3838 {
    3839  VMA_CLASS_NO_COPY(VmaRawList)
    3840 public:
    3841  typedef VmaListItem<T> ItemType;
    3842 
    3843  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3844  ~VmaRawList();
    3845  void Clear();
    3846 
    3847  size_t GetCount() const { return m_Count; }
    3848  bool IsEmpty() const { return m_Count == 0; }
    3849 
    3850  ItemType* Front() { return m_pFront; }
    3851  const ItemType* Front() const { return m_pFront; }
    3852  ItemType* Back() { return m_pBack; }
    3853  const ItemType* Back() const { return m_pBack; }
    3854 
    3855  ItemType* PushBack();
    3856  ItemType* PushFront();
    3857  ItemType* PushBack(const T& value);
    3858  ItemType* PushFront(const T& value);
    3859  void PopBack();
    3860  void PopFront();
    3861 
    3862  // Item can be null - it means PushBack.
    3863  ItemType* InsertBefore(ItemType* pItem);
    3864  // Item can be null - it means PushFront.
    3865  ItemType* InsertAfter(ItemType* pItem);
    3866 
    3867  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3868  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3869 
    3870  void Remove(ItemType* pItem);
    3871 
    3872 private:
    3873  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3874  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3875  ItemType* m_pFront;
    3876  ItemType* m_pBack;
    3877  size_t m_Count;
    3878 };
    3879 
    3880 template<typename T>
    3881 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3882  m_pAllocationCallbacks(pAllocationCallbacks),
    3883  m_ItemAllocator(pAllocationCallbacks, 128),
    3884  m_pFront(VMA_NULL),
    3885  m_pBack(VMA_NULL),
    3886  m_Count(0)
    3887 {
    3888 }
    3889 
    3890 template<typename T>
    3891 VmaRawList<T>::~VmaRawList()
    3892 {
    3893  // Intentionally not calling Clear, because that would be unnecessary
    3894  // computations to return all items to m_ItemAllocator as free.
    3895 }
    3896 
    3897 template<typename T>
    3898 void VmaRawList<T>::Clear()
    3899 {
    3900  if(IsEmpty() == false)
    3901  {
    3902  ItemType* pItem = m_pBack;
    3903  while(pItem != VMA_NULL)
    3904  {
    3905  ItemType* const pPrevItem = pItem->pPrev;
    3906  m_ItemAllocator.Free(pItem);
    3907  pItem = pPrevItem;
    3908  }
    3909  m_pFront = VMA_NULL;
    3910  m_pBack = VMA_NULL;
    3911  m_Count = 0;
    3912  }
    3913 }
    3914 
    3915 template<typename T>
    3916 VmaListItem<T>* VmaRawList<T>::PushBack()
    3917 {
    3918  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3919  pNewItem->pNext = VMA_NULL;
    3920  if(IsEmpty())
    3921  {
    3922  pNewItem->pPrev = VMA_NULL;
    3923  m_pFront = pNewItem;
    3924  m_pBack = pNewItem;
    3925  m_Count = 1;
    3926  }
    3927  else
    3928  {
    3929  pNewItem->pPrev = m_pBack;
    3930  m_pBack->pNext = pNewItem;
    3931  m_pBack = pNewItem;
    3932  ++m_Count;
    3933  }
    3934  return pNewItem;
    3935 }
    3936 
    3937 template<typename T>
    3938 VmaListItem<T>* VmaRawList<T>::PushFront()
    3939 {
    3940  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3941  pNewItem->pPrev = VMA_NULL;
    3942  if(IsEmpty())
    3943  {
    3944  pNewItem->pNext = VMA_NULL;
    3945  m_pFront = pNewItem;
    3946  m_pBack = pNewItem;
    3947  m_Count = 1;
    3948  }
    3949  else
    3950  {
    3951  pNewItem->pNext = m_pFront;
    3952  m_pFront->pPrev = pNewItem;
    3953  m_pFront = pNewItem;
    3954  ++m_Count;
    3955  }
    3956  return pNewItem;
    3957 }
    3958 
    3959 template<typename T>
    3960 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3961 {
    3962  ItemType* const pNewItem = PushBack();
    3963  pNewItem->Value = value;
    3964  return pNewItem;
    3965 }
    3966 
    3967 template<typename T>
    3968 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3969 {
    3970  ItemType* const pNewItem = PushFront();
    3971  pNewItem->Value = value;
    3972  return pNewItem;
    3973 }
    3974 
    3975 template<typename T>
    3976 void VmaRawList<T>::PopBack()
    3977 {
    3978  VMA_HEAVY_ASSERT(m_Count > 0);
    3979  ItemType* const pBackItem = m_pBack;
    3980  ItemType* const pPrevItem = pBackItem->pPrev;
    3981  if(pPrevItem != VMA_NULL)
    3982  {
    3983  pPrevItem->pNext = VMA_NULL;
    3984  }
    3985  m_pBack = pPrevItem;
    3986  m_ItemAllocator.Free(pBackItem);
    3987  --m_Count;
    3988 }
    3989 
    3990 template<typename T>
    3991 void VmaRawList<T>::PopFront()
    3992 {
    3993  VMA_HEAVY_ASSERT(m_Count > 0);
    3994  ItemType* const pFrontItem = m_pFront;
    3995  ItemType* const pNextItem = pFrontItem->pNext;
    3996  if(pNextItem != VMA_NULL)
    3997  {
    3998  pNextItem->pPrev = VMA_NULL;
    3999  }
    4000  m_pFront = pNextItem;
    4001  m_ItemAllocator.Free(pFrontItem);
    4002  --m_Count;
    4003 }
    4004 
    4005 template<typename T>
    4006 void VmaRawList<T>::Remove(ItemType* pItem)
    4007 {
    4008  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4009  VMA_HEAVY_ASSERT(m_Count > 0);
    4010 
    4011  if(pItem->pPrev != VMA_NULL)
    4012  {
    4013  pItem->pPrev->pNext = pItem->pNext;
    4014  }
    4015  else
    4016  {
    4017  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4018  m_pFront = pItem->pNext;
    4019  }
    4020 
    4021  if(pItem->pNext != VMA_NULL)
    4022  {
    4023  pItem->pNext->pPrev = pItem->pPrev;
    4024  }
    4025  else
    4026  {
    4027  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4028  m_pBack = pItem->pPrev;
    4029  }
    4030 
    4031  m_ItemAllocator.Free(pItem);
    4032  --m_Count;
    4033 }
    4034 
    4035 template<typename T>
    4036 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4037 {
    4038  if(pItem != VMA_NULL)
    4039  {
    4040  ItemType* const prevItem = pItem->pPrev;
    4041  ItemType* const newItem = m_ItemAllocator.Alloc();
    4042  newItem->pPrev = prevItem;
    4043  newItem->pNext = pItem;
    4044  pItem->pPrev = newItem;
    4045  if(prevItem != VMA_NULL)
    4046  {
    4047  prevItem->pNext = newItem;
    4048  }
    4049  else
    4050  {
    4051  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4052  m_pFront = newItem;
    4053  }
    4054  ++m_Count;
    4055  return newItem;
    4056  }
    4057  else
    4058  return PushBack();
    4059 }
    4060 
    4061 template<typename T>
    4062 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4063 {
    4064  if(pItem != VMA_NULL)
    4065  {
    4066  ItemType* const nextItem = pItem->pNext;
    4067  ItemType* const newItem = m_ItemAllocator.Alloc();
    4068  newItem->pNext = nextItem;
    4069  newItem->pPrev = pItem;
    4070  pItem->pNext = newItem;
    4071  if(nextItem != VMA_NULL)
    4072  {
    4073  nextItem->pPrev = newItem;
    4074  }
    4075  else
    4076  {
    4077  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4078  m_pBack = newItem;
    4079  }
    4080  ++m_Count;
    4081  return newItem;
    4082  }
    4083  else
    4084  return PushFront();
    4085 }
    4086 
    4087 template<typename T>
    4088 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4089 {
    4090  ItemType* const newItem = InsertBefore(pItem);
    4091  newItem->Value = value;
    4092  return newItem;
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4097 {
    4098  ItemType* const newItem = InsertAfter(pItem);
    4099  newItem->Value = value;
    4100  return newItem;
    4101 }
    4102 
    4103 template<typename T, typename AllocatorT>
    4104 class VmaList
    4105 {
    4106  VMA_CLASS_NO_COPY(VmaList)
    4107 public:
    4108  class iterator
    4109  {
    4110  public:
    4111  iterator() :
    4112  m_pList(VMA_NULL),
    4113  m_pItem(VMA_NULL)
    4114  {
    4115  }
    4116 
    4117  T& operator*() const
    4118  {
    4119  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4120  return m_pItem->Value;
    4121  }
    4122  T* operator->() const
    4123  {
    4124  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4125  return &m_pItem->Value;
    4126  }
    4127 
    4128  iterator& operator++()
    4129  {
    4130  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4131  m_pItem = m_pItem->pNext;
    4132  return *this;
    4133  }
    4134  iterator& operator--()
    4135  {
    4136  if(m_pItem != VMA_NULL)
    4137  {
    4138  m_pItem = m_pItem->pPrev;
    4139  }
    4140  else
    4141  {
    4142  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4143  m_pItem = m_pList->Back();
    4144  }
    4145  return *this;
    4146  }
    4147 
    4148  iterator operator++(int)
    4149  {
    4150  iterator result = *this;
    4151  ++*this;
    4152  return result;
    4153  }
    4154  iterator operator--(int)
    4155  {
    4156  iterator result = *this;
    4157  --*this;
    4158  return result;
    4159  }
    4160 
    4161  bool operator==(const iterator& rhs) const
    4162  {
    4163  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4164  return m_pItem == rhs.m_pItem;
    4165  }
    4166  bool operator!=(const iterator& rhs) const
    4167  {
    4168  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4169  return m_pItem != rhs.m_pItem;
    4170  }
    4171 
    4172  private:
    4173  VmaRawList<T>* m_pList;
    4174  VmaListItem<T>* m_pItem;
    4175 
    4176  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4177  m_pList(pList),
    4178  m_pItem(pItem)
    4179  {
    4180  }
    4181 
    4182  friend class VmaList<T, AllocatorT>;
    4183  };
    4184 
    4185  class const_iterator
    4186  {
    4187  public:
    4188  const_iterator() :
    4189  m_pList(VMA_NULL),
    4190  m_pItem(VMA_NULL)
    4191  {
    4192  }
    4193 
    4194  const_iterator(const iterator& src) :
    4195  m_pList(src.m_pList),
    4196  m_pItem(src.m_pItem)
    4197  {
    4198  }
    4199 
    4200  const T& operator*() const
    4201  {
    4202  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4203  return m_pItem->Value;
    4204  }
    4205  const T* operator->() const
    4206  {
    4207  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4208  return &m_pItem->Value;
    4209  }
    4210 
    4211  const_iterator& operator++()
    4212  {
    4213  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4214  m_pItem = m_pItem->pNext;
    4215  return *this;
    4216  }
    4217  const_iterator& operator--()
    4218  {
    4219  if(m_pItem != VMA_NULL)
    4220  {
    4221  m_pItem = m_pItem->pPrev;
    4222  }
    4223  else
    4224  {
    4225  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4226  m_pItem = m_pList->Back();
    4227  }
    4228  return *this;
    4229  }
    4230 
    4231  const_iterator operator++(int)
    4232  {
    4233  const_iterator result = *this;
    4234  ++*this;
    4235  return result;
    4236  }
    4237  const_iterator operator--(int)
    4238  {
    4239  const_iterator result = *this;
    4240  --*this;
    4241  return result;
    4242  }
    4243 
    4244  bool operator==(const const_iterator& rhs) const
    4245  {
    4246  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4247  return m_pItem == rhs.m_pItem;
    4248  }
    4249  bool operator!=(const const_iterator& rhs) const
    4250  {
    4251  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4252  return m_pItem != rhs.m_pItem;
    4253  }
    4254 
    4255  private:
    4256  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4257  m_pList(pList),
    4258  m_pItem(pItem)
    4259  {
    4260  }
    4261 
    4262  const VmaRawList<T>* m_pList;
    4263  const VmaListItem<T>* m_pItem;
    4264 
    4265  friend class VmaList<T, AllocatorT>;
    4266  };
    4267 
    4268  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4269 
    4270  bool empty() const { return m_RawList.IsEmpty(); }
    4271  size_t size() const { return m_RawList.GetCount(); }
    4272 
    4273  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4274  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4275 
    4276  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4277  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4278 
    4279  void clear() { m_RawList.Clear(); }
    4280  void push_back(const T& value) { m_RawList.PushBack(value); }
    4281  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4282  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4283 
    4284 private:
    4285  VmaRawList<T> m_RawList;
    4286 };
    4287 
    4288 #endif // #if VMA_USE_STL_LIST
    4289 
    4291 // class VmaMap
    4292 
    4293 // Unused in this version.
    4294 #if 0
    4295 
    4296 #if VMA_USE_STL_UNORDERED_MAP
    4297 
    4298 #define VmaPair std::pair
    4299 
    4300 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4301  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4302 
    4303 #else // #if VMA_USE_STL_UNORDERED_MAP
    4304 
    4305 template<typename T1, typename T2>
    4306 struct VmaPair
    4307 {
    4308  T1 first;
    4309  T2 second;
    4310 
    4311  VmaPair() : first(), second() { }
    4312  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4313 };
    4314 
    4315 /* Class compatible with subset of interface of std::unordered_map.
    4316 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4317 */
    4318 template<typename KeyT, typename ValueT>
    4319 class VmaMap
    4320 {
    4321 public:
    4322  typedef VmaPair<KeyT, ValueT> PairType;
    4323  typedef PairType* iterator;
    4324 
    4325  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4326 
    4327  iterator begin() { return m_Vector.begin(); }
    4328  iterator end() { return m_Vector.end(); }
    4329 
    4330  void insert(const PairType& pair);
    4331  iterator find(const KeyT& key);
    4332  void erase(iterator it);
    4333 
    4334 private:
    4335  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4336 };
    4337 
    4338 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4339 
    4340 template<typename FirstT, typename SecondT>
    4341 struct VmaPairFirstLess
    4342 {
    4343  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4344  {
    4345  return lhs.first < rhs.first;
    4346  }
    4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4348  {
    4349  return lhs.first < rhsFirst;
    4350  }
    4351 };
    4352 
    4353 template<typename KeyT, typename ValueT>
    4354 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4355 {
    4356  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4357  m_Vector.data(),
    4358  m_Vector.data() + m_Vector.size(),
    4359  pair,
    4360  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4361  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4362 }
    4363 
    4364 template<typename KeyT, typename ValueT>
    4365 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4366 {
    4367  PairType* it = VmaBinaryFindFirstNotLess(
    4368  m_Vector.data(),
    4369  m_Vector.data() + m_Vector.size(),
    4370  key,
    4371  VmaPairFirstLess<KeyT, ValueT>());
    4372  if((it != m_Vector.end()) && (it->first == key))
    4373  {
    4374  return it;
    4375  }
    4376  else
    4377  {
    4378  return m_Vector.end();
    4379  }
    4380 }
    4381 
    4382 template<typename KeyT, typename ValueT>
    4383 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4384 {
    4385  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4386 }
    4387 
    4388 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4389 
    4390 #endif // #if 0
    4391 
    4393 
    4394 class VmaDeviceMemoryBlock;
    4395 
    4396 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4397 
    4398 struct VmaAllocation_T
    4399 {
    4400  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4401 private:
    4402  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4403 
    4404  enum FLAGS
    4405  {
    4406  FLAG_USER_DATA_STRING = 0x01,
    4407  };
    4408 
    4409 public:
    4410  enum ALLOCATION_TYPE
    4411  {
    4412  ALLOCATION_TYPE_NONE,
    4413  ALLOCATION_TYPE_BLOCK,
    4414  ALLOCATION_TYPE_DEDICATED,
    4415  };
    4416 
    4417  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4418  m_Alignment(1),
    4419  m_Size(0),
    4420  m_pUserData(VMA_NULL),
    4421  m_LastUseFrameIndex(currentFrameIndex),
    4422  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4423  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4424  m_MapCount(0),
    4425  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4426  {
    4427 #if VMA_STATS_STRING_ENABLED
    4428  m_CreationFrameIndex = currentFrameIndex;
    4429  m_BufferImageUsage = 0;
    4430 #endif
    4431  }
    4432 
    4433  ~VmaAllocation_T()
    4434  {
    4435  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4436 
    4437  // Check if owned string was freed.
    4438  VMA_ASSERT(m_pUserData == VMA_NULL);
    4439  }
    4440 
    4441  void InitBlockAllocation(
    4442  VmaPool hPool,
    4443  VmaDeviceMemoryBlock* block,
    4444  VkDeviceSize offset,
    4445  VkDeviceSize alignment,
    4446  VkDeviceSize size,
    4447  VmaSuballocationType suballocationType,
    4448  bool mapped,
    4449  bool canBecomeLost)
    4450  {
    4451  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4452  VMA_ASSERT(block != VMA_NULL);
    4453  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4454  m_Alignment = alignment;
    4455  m_Size = size;
    4456  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4457  m_SuballocationType = (uint8_t)suballocationType;
    4458  m_BlockAllocation.m_hPool = hPool;
    4459  m_BlockAllocation.m_Block = block;
    4460  m_BlockAllocation.m_Offset = offset;
    4461  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4462  }
    4463 
    4464  void InitLost()
    4465  {
    4466  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4467  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4468  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4469  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4470  m_BlockAllocation.m_Block = VMA_NULL;
    4471  m_BlockAllocation.m_Offset = 0;
    4472  m_BlockAllocation.m_CanBecomeLost = true;
    4473  }
    4474 
    4475  void ChangeBlockAllocation(
    4476  VmaAllocator hAllocator,
    4477  VmaDeviceMemoryBlock* block,
    4478  VkDeviceSize offset);
    4479 
    4480  // pMappedData not null means allocation is created with MAPPED flag.
    4481  void InitDedicatedAllocation(
    4482  uint32_t memoryTypeIndex,
    4483  VkDeviceMemory hMemory,
    4484  VmaSuballocationType suballocationType,
    4485  void* pMappedData,
    4486  VkDeviceSize size)
    4487  {
    4488  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4489  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4490  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4491  m_Alignment = 0;
    4492  m_Size = size;
    4493  m_SuballocationType = (uint8_t)suballocationType;
    4494  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4495  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4496  m_DedicatedAllocation.m_hMemory = hMemory;
    4497  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4498  }
    4499 
    4500  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4501  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4502  VkDeviceSize GetSize() const { return m_Size; }
    4503  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4504  void* GetUserData() const { return m_pUserData; }
    4505  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4506  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4507 
    4508  VmaDeviceMemoryBlock* GetBlock() const
    4509  {
    4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4511  return m_BlockAllocation.m_Block;
    4512  }
    4513  VkDeviceSize GetOffset() const;
    4514  VkDeviceMemory GetMemory() const;
    4515  uint32_t GetMemoryTypeIndex() const;
    4516  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4517  void* GetMappedData() const;
    4518  bool CanBecomeLost() const;
    4519  VmaPool GetPool() const;
    4520 
    4521  uint32_t GetLastUseFrameIndex() const
    4522  {
    4523  return m_LastUseFrameIndex.load();
    4524  }
    4525  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4526  {
    4527  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4528  }
    4529  /*
    4530  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4531  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4532  - Else, returns false.
    4533 
    4534  If hAllocation is already lost, assert - you should not call it then.
    4535  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4536  */
    4537  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4538 
    4539  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4540  {
    4541  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4542  outInfo.blockCount = 1;
    4543  outInfo.allocationCount = 1;
    4544  outInfo.unusedRangeCount = 0;
    4545  outInfo.usedBytes = m_Size;
    4546  outInfo.unusedBytes = 0;
    4547  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4548  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4549  outInfo.unusedRangeSizeMax = 0;
    4550  }
    4551 
    4552  void BlockAllocMap();
    4553  void BlockAllocUnmap();
    4554  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4555  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4556 
    4557 #if VMA_STATS_STRING_ENABLED
    4558  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4559  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4560 
    4561  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4562  {
    4563  VMA_ASSERT(m_BufferImageUsage == 0);
    4564  m_BufferImageUsage = bufferImageUsage;
    4565  }
    4566 
    4567  void PrintParameters(class VmaJsonWriter& json) const;
    4568 #endif
    4569 
    4570 private:
    4571  VkDeviceSize m_Alignment;
    4572  VkDeviceSize m_Size;
    4573  void* m_pUserData;
    4574  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4575  uint8_t m_Type; // ALLOCATION_TYPE
    4576  uint8_t m_SuballocationType; // VmaSuballocationType
    4577  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4578  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4579  uint8_t m_MapCount;
    4580  uint8_t m_Flags; // enum FLAGS
    4581 
    4582  // Allocation out of VmaDeviceMemoryBlock.
    4583  struct BlockAllocation
    4584  {
    4585  VmaPool m_hPool; // Null if belongs to general memory.
    4586  VmaDeviceMemoryBlock* m_Block;
    4587  VkDeviceSize m_Offset;
    4588  bool m_CanBecomeLost;
    4589  };
    4590 
    4591  // Allocation for an object that has its own private VkDeviceMemory.
    4592  struct DedicatedAllocation
    4593  {
    4594  uint32_t m_MemoryTypeIndex;
    4595  VkDeviceMemory m_hMemory;
    4596  void* m_pMappedData; // Not null means memory is mapped.
    4597  };
    4598 
    4599  union
    4600  {
    4601  // Allocation out of VmaDeviceMemoryBlock.
    4602  BlockAllocation m_BlockAllocation;
    4603  // Allocation for an object that has its own private VkDeviceMemory.
    4604  DedicatedAllocation m_DedicatedAllocation;
    4605  };
    4606 
    4607 #if VMA_STATS_STRING_ENABLED
    4608  uint32_t m_CreationFrameIndex;
    4609  uint32_t m_BufferImageUsage; // 0 if unknown.
    4610 #endif
    4611 
    4612  void FreeUserDataString(VmaAllocator hAllocator);
    4613 };
    4614 
    4615 /*
    4616 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4617 allocated memory block or free.
    4618 */
    4619 struct VmaSuballocation
    4620 {
    4621  VkDeviceSize offset;
    4622  VkDeviceSize size;
    4623  VmaAllocation hAllocation;
    4624  VmaSuballocationType type;
    4625 };
    4626 
    4627 // Comparator for offsets.
    4628 struct VmaSuballocationOffsetLess
    4629 {
    4630  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4631  {
    4632  return lhs.offset < rhs.offset;
    4633  }
    4634 };
    4635 struct VmaSuballocationOffsetGreater
    4636 {
    4637  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4638  {
    4639  return lhs.offset > rhs.offset;
    4640  }
    4641 };
    4642 
    4643 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4644 
    4645 // Cost of one additional allocation lost, as equivalent in bytes.
    4646 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4647 
    4648 /*
    4649 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4650 
    4651 If canMakeOtherLost was false:
    4652 - item points to a FREE suballocation.
    4653 - itemsToMakeLostCount is 0.
    4654 
    4655 If canMakeOtherLost was true:
    4656 - item points to first of sequence of suballocations, which are either FREE,
    4657  or point to VmaAllocations that can become lost.
    4658 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4659  the requested allocation to succeed.
    4660 */
    4661 struct VmaAllocationRequest
    4662 {
    4663  VkDeviceSize offset;
    4664  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4665  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4666  VmaSuballocationList::iterator item;
    4667  size_t itemsToMakeLostCount;
    4668  void* customData;
    4669 
    4670  VkDeviceSize CalcCost() const
    4671  {
    4672  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4673  }
    4674 };
    4675 
    4676 /*
    4677 Data structure used for bookkeeping of allocations and unused ranges of memory
    4678 in a single VkDeviceMemory block.
    4679 */
    4680 class VmaBlockMetadata
    4681 {
    4682 public:
    4683  VmaBlockMetadata(VmaAllocator hAllocator);
    4684  virtual ~VmaBlockMetadata() { }
    4685  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4686 
    4687  // Validates all data structures inside this object. If not valid, returns false.
    4688  virtual bool Validate() const = 0;
    4689  VkDeviceSize GetSize() const { return m_Size; }
    4690  virtual size_t GetAllocationCount() const = 0;
    4691  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4692  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4693  // Returns true if this block is empty - contains only single free suballocation.
    4694  virtual bool IsEmpty() const = 0;
    4695 
    4696  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4697  // Shouldn't modify blockCount.
    4698  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4699 
    4700 #if VMA_STATS_STRING_ENABLED
    4701  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4702 #endif
    4703 
    4704  // Tries to find a place for suballocation with given parameters inside this block.
    4705  // If succeeded, fills pAllocationRequest and returns true.
    4706  // If failed, returns false.
    4707  virtual bool CreateAllocationRequest(
    4708  uint32_t currentFrameIndex,
    4709  uint32_t frameInUseCount,
    4710  VkDeviceSize bufferImageGranularity,
    4711  VkDeviceSize allocSize,
    4712  VkDeviceSize allocAlignment,
    4713  bool upperAddress,
    4714  VmaSuballocationType allocType,
    4715  bool canMakeOtherLost,
    4716  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4717  VmaAllocationRequest* pAllocationRequest) = 0;
    4718 
    4719  virtual bool MakeRequestedAllocationsLost(
    4720  uint32_t currentFrameIndex,
    4721  uint32_t frameInUseCount,
    4722  VmaAllocationRequest* pAllocationRequest) = 0;
    4723 
    4724  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4725 
    4726  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4727 
    4728  // Makes actual allocation based on request. Request must already be checked and valid.
    4729  virtual void Alloc(
    4730  const VmaAllocationRequest& request,
    4731  VmaSuballocationType type,
    4732  VkDeviceSize allocSize,
    4733  bool upperAddress,
    4734  VmaAllocation hAllocation) = 0;
    4735 
    4736  // Frees suballocation assigned to given memory region.
    4737  virtual void Free(const VmaAllocation allocation) = 0;
    4738  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4739 
    4740 protected:
    4741  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4742 
    4743 #if VMA_STATS_STRING_ENABLED
    4744  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4745  VkDeviceSize unusedBytes,
    4746  size_t allocationCount,
    4747  size_t unusedRangeCount) const;
    4748  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4749  VkDeviceSize offset,
    4750  VmaAllocation hAllocation) const;
    4751  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4752  VkDeviceSize offset,
    4753  VkDeviceSize size) const;
    4754  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4755 #endif
    4756 
    4757 private:
    4758  VkDeviceSize m_Size;
    4759  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4760 };
    4761 
    4762 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4763  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4764  return false; \
    4765  } } while(false)
    4766 
    4767 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4768 {
    4769  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4770 public:
    4771  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4772  virtual ~VmaBlockMetadata_Generic();
    4773  virtual void Init(VkDeviceSize size);
    4774 
    4775  virtual bool Validate() const;
    4776  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4777  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4778  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4779  virtual bool IsEmpty() const;
    4780 
    4781  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4782  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4783 
    4784 #if VMA_STATS_STRING_ENABLED
    4785  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4786 #endif
    4787 
    4788  virtual bool CreateAllocationRequest(
    4789  uint32_t currentFrameIndex,
    4790  uint32_t frameInUseCount,
    4791  VkDeviceSize bufferImageGranularity,
    4792  VkDeviceSize allocSize,
    4793  VkDeviceSize allocAlignment,
    4794  bool upperAddress,
    4795  VmaSuballocationType allocType,
    4796  bool canMakeOtherLost,
    4797  uint32_t strategy,
    4798  VmaAllocationRequest* pAllocationRequest);
    4799 
    4800  virtual bool MakeRequestedAllocationsLost(
    4801  uint32_t currentFrameIndex,
    4802  uint32_t frameInUseCount,
    4803  VmaAllocationRequest* pAllocationRequest);
    4804 
    4805  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4806 
    4807  virtual VkResult CheckCorruption(const void* pBlockData);
    4808 
    4809  virtual void Alloc(
    4810  const VmaAllocationRequest& request,
    4811  VmaSuballocationType type,
    4812  VkDeviceSize allocSize,
    4813  bool upperAddress,
    4814  VmaAllocation hAllocation);
    4815 
    4816  virtual void Free(const VmaAllocation allocation);
    4817  virtual void FreeAtOffset(VkDeviceSize offset);
    4818 
    4819 private:
    4820  uint32_t m_FreeCount;
    4821  VkDeviceSize m_SumFreeSize;
    4822  VmaSuballocationList m_Suballocations;
    4823  // Suballocations that are free and have size greater than certain threshold.
    4824  // Sorted by size, ascending.
    4825  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4826 
    4827  bool ValidateFreeSuballocationList() const;
    4828 
    4829  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4830  // If yes, fills pOffset and returns true. If no, returns false.
    4831  bool CheckAllocation(
    4832  uint32_t currentFrameIndex,
    4833  uint32_t frameInUseCount,
    4834  VkDeviceSize bufferImageGranularity,
    4835  VkDeviceSize allocSize,
    4836  VkDeviceSize allocAlignment,
    4837  VmaSuballocationType allocType,
    4838  VmaSuballocationList::const_iterator suballocItem,
    4839  bool canMakeOtherLost,
    4840  VkDeviceSize* pOffset,
    4841  size_t* itemsToMakeLostCount,
    4842  VkDeviceSize* pSumFreeSize,
    4843  VkDeviceSize* pSumItemSize) const;
    4844  // Given free suballocation, it merges it with following one, which must also be free.
    4845  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4846  // Releases given suballocation, making it free.
    4847  // Merges it with adjacent free suballocations if applicable.
    4848  // Returns iterator to new free suballocation at this place.
    4849  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4850  // Given free suballocation, it inserts it into sorted list of
    4851  // m_FreeSuballocationsBySize if it's suitable.
    4852  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4853  // Given free suballocation, it removes it from sorted list of
    4854  // m_FreeSuballocationsBySize if it's suitable.
    4855  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4856 };
    4857 
    4858 /*
    4859 Allocations and their references in internal data structure look like this:
    4860 
    4861 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4862 
    4863  0 +-------+
    4864  | |
    4865  | |
    4866  | |
    4867  +-------+
    4868  | Alloc | 1st[m_1stNullItemsBeginCount]
    4869  +-------+
    4870  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4871  +-------+
    4872  | ... |
    4873  +-------+
    4874  | Alloc | 1st[1st.size() - 1]
    4875  +-------+
    4876  | |
    4877  | |
    4878  | |
    4879 GetSize() +-------+
    4880 
    4881 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4882 
    4883  0 +-------+
    4884  | Alloc | 2nd[0]
    4885  +-------+
    4886  | Alloc | 2nd[1]
    4887  +-------+
    4888  | ... |
    4889  +-------+
    4890  | Alloc | 2nd[2nd.size() - 1]
    4891  +-------+
    4892  | |
    4893  | |
    4894  | |
    4895  +-------+
    4896  | Alloc | 1st[m_1stNullItemsBeginCount]
    4897  +-------+
    4898  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4899  +-------+
    4900  | ... |
    4901  +-------+
    4902  | Alloc | 1st[1st.size() - 1]
    4903  +-------+
    4904  | |
    4905 GetSize() +-------+
    4906 
    4907 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4908 
    4909  0 +-------+
    4910  | |
    4911  | |
    4912  | |
    4913  +-------+
    4914  | Alloc | 1st[m_1stNullItemsBeginCount]
    4915  +-------+
    4916  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4917  +-------+
    4918  | ... |
    4919  +-------+
    4920  | Alloc | 1st[1st.size() - 1]
    4921  +-------+
    4922  | |
    4923  | |
    4924  | |
    4925  +-------+
    4926  | Alloc | 2nd[2nd.size() - 1]
    4927  +-------+
    4928  | ... |
    4929  +-------+
    4930  | Alloc | 2nd[1]
    4931  +-------+
    4932  | Alloc | 2nd[0]
    4933 GetSize() +-------+
    4934 
    4935 */
    4936 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4937 {
    4938  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4939 public:
    4940  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4941  virtual ~VmaBlockMetadata_Linear();
    4942  virtual void Init(VkDeviceSize size);
    4943 
    4944  virtual bool Validate() const;
    4945  virtual size_t GetAllocationCount() const;
    4946  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4947  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4948  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4949 
    4950  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4951  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4952 
    4953 #if VMA_STATS_STRING_ENABLED
    4954  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4955 #endif
    4956 
    4957  virtual bool CreateAllocationRequest(
    4958  uint32_t currentFrameIndex,
    4959  uint32_t frameInUseCount,
    4960  VkDeviceSize bufferImageGranularity,
    4961  VkDeviceSize allocSize,
    4962  VkDeviceSize allocAlignment,
    4963  bool upperAddress,
    4964  VmaSuballocationType allocType,
    4965  bool canMakeOtherLost,
    4966  uint32_t strategy,
    4967  VmaAllocationRequest* pAllocationRequest);
    4968 
    4969  virtual bool MakeRequestedAllocationsLost(
    4970  uint32_t currentFrameIndex,
    4971  uint32_t frameInUseCount,
    4972  VmaAllocationRequest* pAllocationRequest);
    4973 
    4974  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4975 
    4976  virtual VkResult CheckCorruption(const void* pBlockData);
    4977 
    4978  virtual void Alloc(
    4979  const VmaAllocationRequest& request,
    4980  VmaSuballocationType type,
    4981  VkDeviceSize allocSize,
    4982  bool upperAddress,
    4983  VmaAllocation hAllocation);
    4984 
    4985  virtual void Free(const VmaAllocation allocation);
    4986  virtual void FreeAtOffset(VkDeviceSize offset);
    4987 
    4988 private:
    4989  /*
    4990  There are two suballocation vectors, used in ping-pong way.
    4991  The one with index m_1stVectorIndex is called 1st.
    4992  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    4993  2nd can be non-empty only when 1st is not empty.
    4994  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    4995  */
    4996  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    4997 
    4998  enum SECOND_VECTOR_MODE
    4999  {
    5000  SECOND_VECTOR_EMPTY,
    5001  /*
    5002  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5003  all have smaller offset.
    5004  */
    5005  SECOND_VECTOR_RING_BUFFER,
    5006  /*
    5007  Suballocations in 2nd vector are upper side of double stack.
    5008  They all have offsets higher than those in 1st vector.
    5009  Top of this stack means smaller offsets, but higher indices in this vector.
    5010  */
    5011  SECOND_VECTOR_DOUBLE_STACK,
    5012  };
    5013 
    5014  VkDeviceSize m_SumFreeSize;
    5015  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5016  uint32_t m_1stVectorIndex;
    5017  SECOND_VECTOR_MODE m_2ndVectorMode;
    5018 
    5019  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5020  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5021  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5022  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5023 
    5024  // Number of items in 1st vector with hAllocation = null at the beginning.
    5025  size_t m_1stNullItemsBeginCount;
    5026  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5027  size_t m_1stNullItemsMiddleCount;
    5028  // Number of items in 2nd vector with hAllocation = null.
    5029  size_t m_2ndNullItemsCount;
    5030 
    5031  bool ShouldCompact1st() const;
    5032  void CleanupAfterFree();
    5033 };
    5034 
    5035 /*
    5036 - GetSize() is the original size of allocated memory block.
    5037 - m_UsableSize is this size aligned down to a power of two.
    5038  All allocations and calculations happen relative to m_UsableSize.
    5039 - GetUnusableSize() is the difference between them.
    5040  It is repoted as separate, unused range, not available for allocations.
    5041 
    5042 Node at level 0 has size = m_UsableSize.
    5043 Each next level contains nodes with size 2 times smaller than current level.
    5044 m_LevelCount is the maximum number of levels to use in the current object.
    5045 */
    5046 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5047 {
    5048  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5049 public:
    5050  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5051  virtual ~VmaBlockMetadata_Buddy();
    5052  virtual void Init(VkDeviceSize size);
    5053 
    5054  virtual bool Validate() const;
    5055  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5056  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5057  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5058  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5059 
    5060  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5061  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5062 
    5063 #if VMA_STATS_STRING_ENABLED
    5064  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5065 #endif
    5066 
    5067  virtual bool CreateAllocationRequest(
    5068  uint32_t currentFrameIndex,
    5069  uint32_t frameInUseCount,
    5070  VkDeviceSize bufferImageGranularity,
    5071  VkDeviceSize allocSize,
    5072  VkDeviceSize allocAlignment,
    5073  bool upperAddress,
    5074  VmaSuballocationType allocType,
    5075  bool canMakeOtherLost,
    5076  uint32_t strategy,
    5077  VmaAllocationRequest* pAllocationRequest);
    5078 
    5079  virtual bool MakeRequestedAllocationsLost(
    5080  uint32_t currentFrameIndex,
    5081  uint32_t frameInUseCount,
    5082  VmaAllocationRequest* pAllocationRequest);
    5083 
    5084  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5085 
    5086  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5087 
    5088  virtual void Alloc(
    5089  const VmaAllocationRequest& request,
    5090  VmaSuballocationType type,
    5091  VkDeviceSize allocSize,
    5092  bool upperAddress,
    5093  VmaAllocation hAllocation);
    5094 
    5095  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5096  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5097 
    5098 private:
    5099  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5100  static const size_t MAX_LEVELS = 30;
    5101 
    5102  struct ValidationContext
    5103  {
    5104  size_t calculatedAllocationCount;
    5105  size_t calculatedFreeCount;
    5106  VkDeviceSize calculatedSumFreeSize;
    5107 
    5108  ValidationContext() :
    5109  calculatedAllocationCount(0),
    5110  calculatedFreeCount(0),
    5111  calculatedSumFreeSize(0) { }
    5112  };
    5113 
    5114  struct Node
    5115  {
    5116  VkDeviceSize offset;
    5117  enum TYPE
    5118  {
    5119  TYPE_FREE,
    5120  TYPE_ALLOCATION,
    5121  TYPE_SPLIT,
    5122  TYPE_COUNT
    5123  } type;
    5124  Node* parent;
    5125  Node* buddy;
    5126 
    5127  union
    5128  {
    5129  struct
    5130  {
    5131  Node* prev;
    5132  Node* next;
    5133  } free;
    5134  struct
    5135  {
    5136  VmaAllocation alloc;
    5137  } allocation;
    5138  struct
    5139  {
    5140  Node* leftChild;
    5141  } split;
    5142  };
    5143  };
    5144 
    5145  // Size of the memory block aligned down to a power of two.
    5146  VkDeviceSize m_UsableSize;
    5147  uint32_t m_LevelCount;
    5148 
    5149  Node* m_Root;
    5150  struct {
    5151  Node* front;
    5152  Node* back;
    5153  } m_FreeList[MAX_LEVELS];
    5154  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5155  size_t m_AllocationCount;
    5156  // Number of nodes in the tree with type == TYPE_FREE.
    5157  size_t m_FreeCount;
    5158  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5159  VkDeviceSize m_SumFreeSize;
    5160 
    5161  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5162  void DeleteNode(Node* node);
    5163  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5164  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5165  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5166  // Alloc passed just for validation. Can be null.
    5167  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5168  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5169  // Adds node to the front of FreeList at given level.
    5170  // node->type must be FREE.
    5171  // node->free.prev, next can be undefined.
    5172  void AddToFreeListFront(uint32_t level, Node* node);
    5173  // Removes node from FreeList at given level.
    5174  // node->type must be FREE.
    5175  // node->free.prev, next stay untouched.
    5176  void RemoveFromFreeList(uint32_t level, Node* node);
    5177 
    5178 #if VMA_STATS_STRING_ENABLED
    5179  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5180 #endif
    5181 };
    5182 
    5183 /*
    5184 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5185 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5186 
    5187 Thread-safety: This class must be externally synchronized.
    5188 */
    5189 class VmaDeviceMemoryBlock
    5190 {
    5191  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5192 public:
    5193  VmaBlockMetadata* m_pMetadata;
    5194 
    5195  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5196 
    5197  ~VmaDeviceMemoryBlock()
    5198  {
    5199  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5200  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5201  }
    5202 
    5203  // Always call after construction.
    5204  void Init(
    5205  VmaAllocator hAllocator,
    5206  uint32_t newMemoryTypeIndex,
    5207  VkDeviceMemory newMemory,
    5208  VkDeviceSize newSize,
    5209  uint32_t id,
    5210  uint32_t algorithm);
    5211  // Always call before destruction.
    5212  void Destroy(VmaAllocator allocator);
    5213 
    5214  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5215  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5216  uint32_t GetId() const { return m_Id; }
    5217  void* GetMappedData() const { return m_pMappedData; }
    5218 
    5219  // Validates all data structures inside this object. If not valid, returns false.
    5220  bool Validate() const;
    5221 
    5222  VkResult CheckCorruption(VmaAllocator hAllocator);
    5223 
    5224  // ppData can be null.
    5225  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5226  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5227 
    5228  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5229  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5230 
    5231  VkResult BindBufferMemory(
    5232  const VmaAllocator hAllocator,
    5233  const VmaAllocation hAllocation,
    5234  VkBuffer hBuffer);
    5235  VkResult BindImageMemory(
    5236  const VmaAllocator hAllocator,
    5237  const VmaAllocation hAllocation,
    5238  VkImage hImage);
    5239 
    5240 private:
    5241  uint32_t m_MemoryTypeIndex;
    5242  uint32_t m_Id;
    5243  VkDeviceMemory m_hMemory;
    5244 
    5245  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5246  // Also protects m_MapCount, m_pMappedData.
    5247  VMA_MUTEX m_Mutex;
    5248  uint32_t m_MapCount;
    5249  void* m_pMappedData;
    5250 };
    5251 
    5252 struct VmaPointerLess
    5253 {
    5254  bool operator()(const void* lhs, const void* rhs) const
    5255  {
    5256  return lhs < rhs;
    5257  }
    5258 };
    5259 
    5260 class VmaDefragmentator;
    5261 
    5262 /*
    5263 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5264 Vulkan memory type.
    5265 
    5266 Synchronized internally with a mutex.
    5267 */
    5268 struct VmaBlockVector
    5269 {
    5270  VMA_CLASS_NO_COPY(VmaBlockVector)
    5271 public:
    5272  VmaBlockVector(
    5273  VmaAllocator hAllocator,
    5274  uint32_t memoryTypeIndex,
    5275  VkDeviceSize preferredBlockSize,
    5276  size_t minBlockCount,
    5277  size_t maxBlockCount,
    5278  VkDeviceSize bufferImageGranularity,
    5279  uint32_t frameInUseCount,
    5280  bool isCustomPool,
    5281  bool explicitBlockSize,
    5282  uint32_t algorithm);
    5283  ~VmaBlockVector();
    5284 
    5285  VkResult CreateMinBlocks();
    5286 
    5287  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5288  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5289  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5290  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5291  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5292 
    5293  void GetPoolStats(VmaPoolStats* pStats);
    5294 
    5295  bool IsEmpty() const { return m_Blocks.empty(); }
    5296  bool IsCorruptionDetectionEnabled() const;
    5297 
    5298  VkResult Allocate(
    5299  VmaPool hCurrentPool,
    5300  uint32_t currentFrameIndex,
    5301  VkDeviceSize size,
    5302  VkDeviceSize alignment,
    5303  const VmaAllocationCreateInfo& createInfo,
    5304  VmaSuballocationType suballocType,
    5305  VmaAllocation* pAllocation);
    5306 
    5307  void Free(
    5308  VmaAllocation hAllocation);
    5309 
    5310  // Adds statistics of this BlockVector to pStats.
    5311  void AddStats(VmaStats* pStats);
    5312 
    5313 #if VMA_STATS_STRING_ENABLED
    5314  void PrintDetailedMap(class VmaJsonWriter& json);
    5315 #endif
    5316 
    5317  void MakePoolAllocationsLost(
    5318  uint32_t currentFrameIndex,
    5319  size_t* pLostAllocationCount);
    5320  VkResult CheckCorruption();
    5321 
    5322  VmaDefragmentator* EnsureDefragmentator(
    5323  VmaAllocator hAllocator,
    5324  uint32_t currentFrameIndex);
    5325 
    5326  VkResult Defragment(
    5327  VmaDefragmentationStats* pDefragmentationStats,
    5328  VkDeviceSize& maxBytesToMove,
    5329  uint32_t& maxAllocationsToMove);
    5330 
    5331  void DestroyDefragmentator();
    5332 
    5333 private:
    5334  friend class VmaDefragmentator;
    5335 
    5336  const VmaAllocator m_hAllocator;
    5337  const uint32_t m_MemoryTypeIndex;
    5338  const VkDeviceSize m_PreferredBlockSize;
    5339  const size_t m_MinBlockCount;
    5340  const size_t m_MaxBlockCount;
    5341  const VkDeviceSize m_BufferImageGranularity;
    5342  const uint32_t m_FrameInUseCount;
    5343  const bool m_IsCustomPool;
    5344  const bool m_ExplicitBlockSize;
    5345  const uint32_t m_Algorithm;
    5346  bool m_HasEmptyBlock;
    5347  VMA_MUTEX m_Mutex;
    5348  // Incrementally sorted by sumFreeSize, ascending.
    5349  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5350  /* There can be at most one allocation that is completely empty - a
    5351  hysteresis to avoid pessimistic case of alternating creation and destruction
    5352  of a VkDeviceMemory. */
    5353  VmaDefragmentator* m_pDefragmentator;
    5354  uint32_t m_NextBlockId;
    5355 
    5356  VkDeviceSize CalcMaxBlockSize() const;
    5357 
    5358  // Finds and removes given block from vector.
    5359  void Remove(VmaDeviceMemoryBlock* pBlock);
    5360 
    5361  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5362  // after this call.
    5363  void IncrementallySortBlocks();
    5364 
    5365  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5366  VkResult AllocateFromBlock(
    5367  VmaDeviceMemoryBlock* pBlock,
    5368  VmaPool hCurrentPool,
    5369  uint32_t currentFrameIndex,
    5370  VkDeviceSize size,
    5371  VkDeviceSize alignment,
    5372  VmaAllocationCreateFlags allocFlags,
    5373  void* pUserData,
    5374  VmaSuballocationType suballocType,
    5375  uint32_t strategy,
    5376  VmaAllocation* pAllocation);
    5377 
    5378  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5379 };
    5380 
    5381 struct VmaPool_T
    5382 {
    5383  VMA_CLASS_NO_COPY(VmaPool_T)
    5384 public:
    5385  VmaBlockVector m_BlockVector;
    5386 
    5387  VmaPool_T(
    5388  VmaAllocator hAllocator,
    5389  const VmaPoolCreateInfo& createInfo,
    5390  VkDeviceSize preferredBlockSize);
    5391  ~VmaPool_T();
    5392 
    5393  uint32_t GetId() const { return m_Id; }
    5394  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5395 
    5396 #if VMA_STATS_STRING_ENABLED
    5397  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5398 #endif
    5399 
    5400 private:
    5401  uint32_t m_Id;
    5402 };
    5403 
    5404 class VmaDefragmentator
    5405 {
    5406  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5407 private:
    5408  const VmaAllocator m_hAllocator;
    5409  VmaBlockVector* const m_pBlockVector;
    5410  uint32_t m_CurrentFrameIndex;
    5411  VkDeviceSize m_BytesMoved;
    5412  uint32_t m_AllocationsMoved;
    5413 
    5414  struct AllocationInfo
    5415  {
    5416  VmaAllocation m_hAllocation;
    5417  VkBool32* m_pChanged;
    5418 
    5419  AllocationInfo() :
    5420  m_hAllocation(VK_NULL_HANDLE),
    5421  m_pChanged(VMA_NULL)
    5422  {
    5423  }
    5424  };
    5425 
    5426  struct AllocationInfoSizeGreater
    5427  {
    5428  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5429  {
    5430  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5431  }
    5432  };
    5433 
    5434  // Used between AddAllocation and Defragment.
    5435  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5436 
    5437  struct BlockInfo
    5438  {
    5439  VmaDeviceMemoryBlock* m_pBlock;
    5440  bool m_HasNonMovableAllocations;
    5441  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5442 
    5443  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5444  m_pBlock(VMA_NULL),
    5445  m_HasNonMovableAllocations(true),
    5446  m_Allocations(pAllocationCallbacks),
    5447  m_pMappedDataForDefragmentation(VMA_NULL)
    5448  {
    5449  }
    5450 
    5451  void CalcHasNonMovableAllocations()
    5452  {
    5453  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5454  const size_t defragmentAllocCount = m_Allocations.size();
    5455  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5456  }
    5457 
    5458  void SortAllocationsBySizeDescecnding()
    5459  {
    5460  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5461  }
    5462 
    5463  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5464  void Unmap(VmaAllocator hAllocator);
    5465 
    5466  private:
    5467  // Not null if mapped for defragmentation only, not originally mapped.
    5468  void* m_pMappedDataForDefragmentation;
    5469  };
    5470 
    5471  struct BlockPointerLess
    5472  {
    5473  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5474  {
    5475  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5476  }
    5477  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5478  {
    5479  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5480  }
    5481  };
    5482 
    5483  // 1. Blocks with some non-movable allocations go first.
    5484  // 2. Blocks with smaller sumFreeSize go first.
    5485  struct BlockInfoCompareMoveDestination
    5486  {
    5487  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5488  {
    5489  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5490  {
    5491  return true;
    5492  }
    5493  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5494  {
    5495  return false;
    5496  }
    5497  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5498  {
    5499  return true;
    5500  }
    5501  return false;
    5502  }
    5503  };
    5504 
    5505  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5506  BlockInfoVector m_Blocks;
    5507 
    5508  VkResult DefragmentRound(
    5509  VkDeviceSize maxBytesToMove,
    5510  uint32_t maxAllocationsToMove);
    5511 
    5512  static bool MoveMakesSense(
    5513  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5514  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5515 
    5516 public:
    5517  VmaDefragmentator(
    5518  VmaAllocator hAllocator,
    5519  VmaBlockVector* pBlockVector,
    5520  uint32_t currentFrameIndex);
    5521 
    5522  ~VmaDefragmentator();
    5523 
    5524  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5525  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5526 
    5527  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5528 
    5529  VkResult Defragment(
    5530  VkDeviceSize maxBytesToMove,
    5531  uint32_t maxAllocationsToMove);
    5532 };
    5533 
    5534 #if VMA_RECORDING_ENABLED
    5535 
    5536 class VmaRecorder
    5537 {
    5538 public:
    5539  VmaRecorder();
    5540  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5541  void WriteConfiguration(
    5542  const VkPhysicalDeviceProperties& devProps,
    5543  const VkPhysicalDeviceMemoryProperties& memProps,
    5544  bool dedicatedAllocationExtensionEnabled);
    5545  ~VmaRecorder();
    5546 
    5547  void RecordCreateAllocator(uint32_t frameIndex);
    5548  void RecordDestroyAllocator(uint32_t frameIndex);
    5549  void RecordCreatePool(uint32_t frameIndex,
    5550  const VmaPoolCreateInfo& createInfo,
    5551  VmaPool pool);
    5552  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5553  void RecordAllocateMemory(uint32_t frameIndex,
    5554  const VkMemoryRequirements& vkMemReq,
    5555  const VmaAllocationCreateInfo& createInfo,
    5556  VmaAllocation allocation);
    5557  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  bool requiresDedicatedAllocation,
    5560  bool prefersDedicatedAllocation,
    5561  const VmaAllocationCreateInfo& createInfo,
    5562  VmaAllocation allocation);
    5563  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5564  const VkMemoryRequirements& vkMemReq,
    5565  bool requiresDedicatedAllocation,
    5566  bool prefersDedicatedAllocation,
    5567  const VmaAllocationCreateInfo& createInfo,
    5568  VmaAllocation allocation);
    5569  void RecordFreeMemory(uint32_t frameIndex,
    5570  VmaAllocation allocation);
    5571  void RecordSetAllocationUserData(uint32_t frameIndex,
    5572  VmaAllocation allocation,
    5573  const void* pUserData);
    5574  void RecordCreateLostAllocation(uint32_t frameIndex,
    5575  VmaAllocation allocation);
    5576  void RecordMapMemory(uint32_t frameIndex,
    5577  VmaAllocation allocation);
    5578  void RecordUnmapMemory(uint32_t frameIndex,
    5579  VmaAllocation allocation);
    5580  void RecordFlushAllocation(uint32_t frameIndex,
    5581  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5582  void RecordInvalidateAllocation(uint32_t frameIndex,
    5583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5584  void RecordCreateBuffer(uint32_t frameIndex,
    5585  const VkBufferCreateInfo& bufCreateInfo,
    5586  const VmaAllocationCreateInfo& allocCreateInfo,
    5587  VmaAllocation allocation);
    5588  void RecordCreateImage(uint32_t frameIndex,
    5589  const VkImageCreateInfo& imageCreateInfo,
    5590  const VmaAllocationCreateInfo& allocCreateInfo,
    5591  VmaAllocation allocation);
    5592  void RecordDestroyBuffer(uint32_t frameIndex,
    5593  VmaAllocation allocation);
    5594  void RecordDestroyImage(uint32_t frameIndex,
    5595  VmaAllocation allocation);
    5596  void RecordTouchAllocation(uint32_t frameIndex,
    5597  VmaAllocation allocation);
    5598  void RecordGetAllocationInfo(uint32_t frameIndex,
    5599  VmaAllocation allocation);
    5600  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5601  VmaPool pool);
    5602 
    5603 private:
    5604  struct CallParams
    5605  {
    5606  uint32_t threadId;
    5607  double time;
    5608  };
    5609 
    5610  class UserDataString
    5611  {
    5612  public:
    5613  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5614  const char* GetString() const { return m_Str; }
    5615 
    5616  private:
    5617  char m_PtrStr[17];
    5618  const char* m_Str;
    5619  };
    5620 
    5621  bool m_UseMutex;
    5622  VmaRecordFlags m_Flags;
    5623  FILE* m_File;
    5624  VMA_MUTEX m_FileMutex;
    5625  int64_t m_Freq;
    5626  int64_t m_StartCounter;
    5627 
    5628  void GetBasicParams(CallParams& outParams);
    5629  void Flush();
    5630 };
    5631 
    5632 #endif // #if VMA_RECORDING_ENABLED
    5633 
    5634 // Main allocator object.
    5635 struct VmaAllocator_T
    5636 {
    5637  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5638 public:
    5639  bool m_UseMutex;
    5640  bool m_UseKhrDedicatedAllocation;
    5641  VkDevice m_hDevice;
    5642  bool m_AllocationCallbacksSpecified;
    5643  VkAllocationCallbacks m_AllocationCallbacks;
    5644  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5645 
    5646  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5647  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5648  VMA_MUTEX m_HeapSizeLimitMutex;
    5649 
    5650  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5651  VkPhysicalDeviceMemoryProperties m_MemProps;
    5652 
    5653  // Default pools.
    5654  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5655 
    5656  // Each vector is sorted by memory (handle value).
    5657  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5658  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5659  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5660 
    5661  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5662  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5663  ~VmaAllocator_T();
    5664 
    5665  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5666  {
    5667  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5668  }
    5669  const VmaVulkanFunctions& GetVulkanFunctions() const
    5670  {
    5671  return m_VulkanFunctions;
    5672  }
    5673 
    5674  VkDeviceSize GetBufferImageGranularity() const
    5675  {
    5676  return VMA_MAX(
    5677  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5678  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5679  }
    5680 
    5681  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5682  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5683 
    5684  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5685  {
    5686  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5687  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5688  }
    5689  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5690  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5691  {
    5692  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5693  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5694  }
    5695  // Minimum alignment for all allocations in specific memory type.
    5696  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5697  {
    5698  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5699  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5700  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5701  }
    5702 
    5703  bool IsIntegratedGpu() const
    5704  {
    5705  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5706  }
    5707 
    5708 #if VMA_RECORDING_ENABLED
    5709  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5710 #endif
    5711 
    5712  void GetBufferMemoryRequirements(
    5713  VkBuffer hBuffer,
    5714  VkMemoryRequirements& memReq,
    5715  bool& requiresDedicatedAllocation,
    5716  bool& prefersDedicatedAllocation) const;
    5717  void GetImageMemoryRequirements(
    5718  VkImage hImage,
    5719  VkMemoryRequirements& memReq,
    5720  bool& requiresDedicatedAllocation,
    5721  bool& prefersDedicatedAllocation) const;
    5722 
    5723  // Main allocation function.
    5724  VkResult AllocateMemory(
    5725  const VkMemoryRequirements& vkMemReq,
    5726  bool requiresDedicatedAllocation,
    5727  bool prefersDedicatedAllocation,
    5728  VkBuffer dedicatedBuffer,
    5729  VkImage dedicatedImage,
    5730  const VmaAllocationCreateInfo& createInfo,
    5731  VmaSuballocationType suballocType,
    5732  VmaAllocation* pAllocation);
    5733 
    5734  // Main deallocation function.
    5735  void FreeMemory(const VmaAllocation allocation);
    5736 
    5737  void CalculateStats(VmaStats* pStats);
    5738 
    5739 #if VMA_STATS_STRING_ENABLED
    5740  void PrintDetailedMap(class VmaJsonWriter& json);
    5741 #endif
    5742 
    5743  VkResult Defragment(
    5744  VmaAllocation* pAllocations,
    5745  size_t allocationCount,
    5746  VkBool32* pAllocationsChanged,
    5747  const VmaDefragmentationInfo* pDefragmentationInfo,
    5748  VmaDefragmentationStats* pDefragmentationStats);
    5749 
    5750  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5751  bool TouchAllocation(VmaAllocation hAllocation);
    5752 
    5753  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5754  void DestroyPool(VmaPool pool);
    5755  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5756 
    5757  void SetCurrentFrameIndex(uint32_t frameIndex);
    5758  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5759 
    5760  void MakePoolAllocationsLost(
    5761  VmaPool hPool,
    5762  size_t* pLostAllocationCount);
    5763  VkResult CheckPoolCorruption(VmaPool hPool);
    5764  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5765 
    5766  void CreateLostAllocation(VmaAllocation* pAllocation);
    5767 
    5768  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5769  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5770 
    5771  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5772  void Unmap(VmaAllocation hAllocation);
    5773 
    5774  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5775  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5776 
    5777  void FlushOrInvalidateAllocation(
    5778  VmaAllocation hAllocation,
    5779  VkDeviceSize offset, VkDeviceSize size,
    5780  VMA_CACHE_OPERATION op);
    5781 
    5782  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5783 
    5784 private:
    5785  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5786 
    5787  VkPhysicalDevice m_PhysicalDevice;
    5788  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5789 
    5790  VMA_MUTEX m_PoolsMutex;
    5791  // Protected by m_PoolsMutex. Sorted by pointer value.
    5792  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5793  uint32_t m_NextPoolId;
    5794 
    5795  VmaVulkanFunctions m_VulkanFunctions;
    5796 
    5797 #if VMA_RECORDING_ENABLED
    5798  VmaRecorder* m_pRecorder;
    5799 #endif
    5800 
    5801  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5802 
    5803  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5804 
    5805  VkResult AllocateMemoryOfType(
    5806  VkDeviceSize size,
    5807  VkDeviceSize alignment,
    5808  bool dedicatedAllocation,
    5809  VkBuffer dedicatedBuffer,
    5810  VkImage dedicatedImage,
    5811  const VmaAllocationCreateInfo& createInfo,
    5812  uint32_t memTypeIndex,
    5813  VmaSuballocationType suballocType,
    5814  VmaAllocation* pAllocation);
    5815 
    5816  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5817  VkResult AllocateDedicatedMemory(
    5818  VkDeviceSize size,
    5819  VmaSuballocationType suballocType,
    5820  uint32_t memTypeIndex,
    5821  bool map,
    5822  bool isUserDataString,
    5823  void* pUserData,
    5824  VkBuffer dedicatedBuffer,
    5825  VkImage dedicatedImage,
    5826  VmaAllocation* pAllocation);
    5827 
    5828  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5829  void FreeDedicatedMemory(VmaAllocation allocation);
    5830 };
    5831 
    5833 // Memory allocation #2 after VmaAllocator_T definition
    5834 
    5835 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5836 {
    5837  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5838 }
    5839 
    5840 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5841 {
    5842  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5843 }
    5844 
    5845 template<typename T>
    5846 static T* VmaAllocate(VmaAllocator hAllocator)
    5847 {
    5848  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5849 }
    5850 
    5851 template<typename T>
    5852 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5853 {
    5854  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5855 }
    5856 
    5857 template<typename T>
    5858 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5859 {
    5860  if(ptr != VMA_NULL)
    5861  {
    5862  ptr->~T();
    5863  VmaFree(hAllocator, ptr);
    5864  }
    5865 }
    5866 
    5867 template<typename T>
    5868 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5869 {
    5870  if(ptr != VMA_NULL)
    5871  {
    5872  for(size_t i = count; i--; )
    5873  ptr[i].~T();
    5874  VmaFree(hAllocator, ptr);
    5875  }
    5876 }
    5877 
    5879 // VmaStringBuilder
    5880 
    5881 #if VMA_STATS_STRING_ENABLED
    5882 
    5883 class VmaStringBuilder
    5884 {
    5885 public:
    5886  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5887  size_t GetLength() const { return m_Data.size(); }
    5888  const char* GetData() const { return m_Data.data(); }
    5889 
    5890  void Add(char ch) { m_Data.push_back(ch); }
    5891  void Add(const char* pStr);
    5892  void AddNewLine() { Add('\n'); }
    5893  void AddNumber(uint32_t num);
    5894  void AddNumber(uint64_t num);
    5895  void AddPointer(const void* ptr);
    5896 
    5897 private:
    5898  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5899 };
    5900 
    5901 void VmaStringBuilder::Add(const char* pStr)
    5902 {
    5903  const size_t strLen = strlen(pStr);
    5904  if(strLen > 0)
    5905  {
    5906  const size_t oldCount = m_Data.size();
    5907  m_Data.resize(oldCount + strLen);
    5908  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5909  }
    5910 }
    5911 
    5912 void VmaStringBuilder::AddNumber(uint32_t num)
    5913 {
    5914  char buf[11];
    5915  VmaUint32ToStr(buf, sizeof(buf), num);
    5916  Add(buf);
    5917 }
    5918 
    5919 void VmaStringBuilder::AddNumber(uint64_t num)
    5920 {
    5921  char buf[21];
    5922  VmaUint64ToStr(buf, sizeof(buf), num);
    5923  Add(buf);
    5924 }
    5925 
    5926 void VmaStringBuilder::AddPointer(const void* ptr)
    5927 {
    5928  char buf[21];
    5929  VmaPtrToStr(buf, sizeof(buf), ptr);
    5930  Add(buf);
    5931 }
    5932 
    5933 #endif // #if VMA_STATS_STRING_ENABLED
    5934 
    5936 // VmaJsonWriter
    5937 
    5938 #if VMA_STATS_STRING_ENABLED
    5939 
    5940 class VmaJsonWriter
    5941 {
    5942  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5943 public:
    5944  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5945  ~VmaJsonWriter();
    5946 
    5947  void BeginObject(bool singleLine = false);
    5948  void EndObject();
    5949 
    5950  void BeginArray(bool singleLine = false);
    5951  void EndArray();
    5952 
    5953  void WriteString(const char* pStr);
    5954  void BeginString(const char* pStr = VMA_NULL);
    5955  void ContinueString(const char* pStr);
    5956  void ContinueString(uint32_t n);
    5957  void ContinueString(uint64_t n);
    5958  void ContinueString_Pointer(const void* ptr);
    5959  void EndString(const char* pStr = VMA_NULL);
    5960 
    5961  void WriteNumber(uint32_t n);
    5962  void WriteNumber(uint64_t n);
    5963  void WriteBool(bool b);
    5964  void WriteNull();
    5965 
    5966 private:
    5967  static const char* const INDENT;
    5968 
    5969  enum COLLECTION_TYPE
    5970  {
    5971  COLLECTION_TYPE_OBJECT,
    5972  COLLECTION_TYPE_ARRAY,
    5973  };
    5974  struct StackItem
    5975  {
    5976  COLLECTION_TYPE type;
    5977  uint32_t valueCount;
    5978  bool singleLineMode;
    5979  };
    5980 
    5981  VmaStringBuilder& m_SB;
    5982  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5983  bool m_InsideString;
    5984 
    5985  void BeginValue(bool isString);
    5986  void WriteIndent(bool oneLess = false);
    5987 };
    5988 
    5989 const char* const VmaJsonWriter::INDENT = " ";
    5990 
    5991 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    5992  m_SB(sb),
    5993  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    5994  m_InsideString(false)
    5995 {
    5996 }
    5997 
    5998 VmaJsonWriter::~VmaJsonWriter()
    5999 {
    6000  VMA_ASSERT(!m_InsideString);
    6001  VMA_ASSERT(m_Stack.empty());
    6002 }
    6003 
    6004 void VmaJsonWriter::BeginObject(bool singleLine)
    6005 {
    6006  VMA_ASSERT(!m_InsideString);
    6007 
    6008  BeginValue(false);
    6009  m_SB.Add('{');
    6010 
    6011  StackItem item;
    6012  item.type = COLLECTION_TYPE_OBJECT;
    6013  item.valueCount = 0;
    6014  item.singleLineMode = singleLine;
    6015  m_Stack.push_back(item);
    6016 }
    6017 
    6018 void VmaJsonWriter::EndObject()
    6019 {
    6020  VMA_ASSERT(!m_InsideString);
    6021 
    6022  WriteIndent(true);
    6023  m_SB.Add('}');
    6024 
    6025  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6026  m_Stack.pop_back();
    6027 }
    6028 
    6029 void VmaJsonWriter::BeginArray(bool singleLine)
    6030 {
    6031  VMA_ASSERT(!m_InsideString);
    6032 
    6033  BeginValue(false);
    6034  m_SB.Add('[');
    6035 
    6036  StackItem item;
    6037  item.type = COLLECTION_TYPE_ARRAY;
    6038  item.valueCount = 0;
    6039  item.singleLineMode = singleLine;
    6040  m_Stack.push_back(item);
    6041 }
    6042 
    6043 void VmaJsonWriter::EndArray()
    6044 {
    6045  VMA_ASSERT(!m_InsideString);
    6046 
    6047  WriteIndent(true);
    6048  m_SB.Add(']');
    6049 
    6050  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6051  m_Stack.pop_back();
    6052 }
    6053 
    6054 void VmaJsonWriter::WriteString(const char* pStr)
    6055 {
    6056  BeginString(pStr);
    6057  EndString();
    6058 }
    6059 
    6060 void VmaJsonWriter::BeginString(const char* pStr)
    6061 {
    6062  VMA_ASSERT(!m_InsideString);
    6063 
    6064  BeginValue(true);
    6065  m_SB.Add('"');
    6066  m_InsideString = true;
    6067  if(pStr != VMA_NULL && pStr[0] != '\0')
    6068  {
    6069  ContinueString(pStr);
    6070  }
    6071 }
    6072 
    6073 void VmaJsonWriter::ContinueString(const char* pStr)
    6074 {
    6075  VMA_ASSERT(m_InsideString);
    6076 
    6077  const size_t strLen = strlen(pStr);
    6078  for(size_t i = 0; i < strLen; ++i)
    6079  {
    6080  char ch = pStr[i];
    6081  if(ch == '\\')
    6082  {
    6083  m_SB.Add("\\\\");
    6084  }
    6085  else if(ch == '"')
    6086  {
    6087  m_SB.Add("\\\"");
    6088  }
    6089  else if(ch >= 32)
    6090  {
    6091  m_SB.Add(ch);
    6092  }
    6093  else switch(ch)
    6094  {
    6095  case '\b':
    6096  m_SB.Add("\\b");
    6097  break;
    6098  case '\f':
    6099  m_SB.Add("\\f");
    6100  break;
    6101  case '\n':
    6102  m_SB.Add("\\n");
    6103  break;
    6104  case '\r':
    6105  m_SB.Add("\\r");
    6106  break;
    6107  case '\t':
    6108  m_SB.Add("\\t");
    6109  break;
    6110  default:
    6111  VMA_ASSERT(0 && "Character not currently supported.");
    6112  break;
    6113  }
    6114  }
    6115 }
    6116 
    6117 void VmaJsonWriter::ContinueString(uint32_t n)
    6118 {
    6119  VMA_ASSERT(m_InsideString);
    6120  m_SB.AddNumber(n);
    6121 }
    6122 
    6123 void VmaJsonWriter::ContinueString(uint64_t n)
    6124 {
    6125  VMA_ASSERT(m_InsideString);
    6126  m_SB.AddNumber(n);
    6127 }
    6128 
    6129 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6130 {
    6131  VMA_ASSERT(m_InsideString);
    6132  m_SB.AddPointer(ptr);
    6133 }
    6134 
    6135 void VmaJsonWriter::EndString(const char* pStr)
    6136 {
    6137  VMA_ASSERT(m_InsideString);
    6138  if(pStr != VMA_NULL && pStr[0] != '\0')
    6139  {
    6140  ContinueString(pStr);
    6141  }
    6142  m_SB.Add('"');
    6143  m_InsideString = false;
    6144 }
    6145 
    6146 void VmaJsonWriter::WriteNumber(uint32_t n)
    6147 {
    6148  VMA_ASSERT(!m_InsideString);
    6149  BeginValue(false);
    6150  m_SB.AddNumber(n);
    6151 }
    6152 
    6153 void VmaJsonWriter::WriteNumber(uint64_t n)
    6154 {
    6155  VMA_ASSERT(!m_InsideString);
    6156  BeginValue(false);
    6157  m_SB.AddNumber(n);
    6158 }
    6159 
    6160 void VmaJsonWriter::WriteBool(bool b)
    6161 {
    6162  VMA_ASSERT(!m_InsideString);
    6163  BeginValue(false);
    6164  m_SB.Add(b ? "true" : "false");
    6165 }
    6166 
    6167 void VmaJsonWriter::WriteNull()
    6168 {
    6169  VMA_ASSERT(!m_InsideString);
    6170  BeginValue(false);
    6171  m_SB.Add("null");
    6172 }
    6173 
    6174 void VmaJsonWriter::BeginValue(bool isString)
    6175 {
    6176  if(!m_Stack.empty())
    6177  {
    6178  StackItem& currItem = m_Stack.back();
    6179  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6180  currItem.valueCount % 2 == 0)
    6181  {
    6182  VMA_ASSERT(isString);
    6183  }
    6184 
    6185  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6186  currItem.valueCount % 2 != 0)
    6187  {
    6188  m_SB.Add(": ");
    6189  }
    6190  else if(currItem.valueCount > 0)
    6191  {
    6192  m_SB.Add(", ");
    6193  WriteIndent();
    6194  }
    6195  else
    6196  {
    6197  WriteIndent();
    6198  }
    6199  ++currItem.valueCount;
    6200  }
    6201 }
    6202 
    6203 void VmaJsonWriter::WriteIndent(bool oneLess)
    6204 {
    6205  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6206  {
    6207  m_SB.AddNewLine();
    6208 
    6209  size_t count = m_Stack.size();
    6210  if(count > 0 && oneLess)
    6211  {
    6212  --count;
    6213  }
    6214  for(size_t i = 0; i < count; ++i)
    6215  {
    6216  m_SB.Add(INDENT);
    6217  }
    6218  }
    6219 }
    6220 
    6221 #endif // #if VMA_STATS_STRING_ENABLED
    6222 
    6224 
    6225 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6226 {
    6227  if(IsUserDataString())
    6228  {
    6229  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6230 
    6231  FreeUserDataString(hAllocator);
    6232 
    6233  if(pUserData != VMA_NULL)
    6234  {
    6235  const char* const newStrSrc = (char*)pUserData;
    6236  const size_t newStrLen = strlen(newStrSrc);
    6237  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6238  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6239  m_pUserData = newStrDst;
    6240  }
    6241  }
    6242  else
    6243  {
    6244  m_pUserData = pUserData;
    6245  }
    6246 }
    6247 
    6248 void VmaAllocation_T::ChangeBlockAllocation(
    6249  VmaAllocator hAllocator,
    6250  VmaDeviceMemoryBlock* block,
    6251  VkDeviceSize offset)
    6252 {
    6253  VMA_ASSERT(block != VMA_NULL);
    6254  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6255 
    6256  // Move mapping reference counter from old block to new block.
    6257  if(block != m_BlockAllocation.m_Block)
    6258  {
    6259  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6260  if(IsPersistentMap())
    6261  ++mapRefCount;
    6262  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6263  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6264  }
    6265 
    6266  m_BlockAllocation.m_Block = block;
    6267  m_BlockAllocation.m_Offset = offset;
    6268 }
    6269 
    6270 VkDeviceSize VmaAllocation_T::GetOffset() const
    6271 {
    6272  switch(m_Type)
    6273  {
    6274  case ALLOCATION_TYPE_BLOCK:
    6275  return m_BlockAllocation.m_Offset;
    6276  case ALLOCATION_TYPE_DEDICATED:
    6277  return 0;
    6278  default:
    6279  VMA_ASSERT(0);
    6280  return 0;
    6281  }
    6282 }
    6283 
    6284 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6285 {
    6286  switch(m_Type)
    6287  {
    6288  case ALLOCATION_TYPE_BLOCK:
    6289  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6290  case ALLOCATION_TYPE_DEDICATED:
    6291  return m_DedicatedAllocation.m_hMemory;
    6292  default:
    6293  VMA_ASSERT(0);
    6294  return VK_NULL_HANDLE;
    6295  }
    6296 }
    6297 
    6298 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6299 {
    6300  switch(m_Type)
    6301  {
    6302  case ALLOCATION_TYPE_BLOCK:
    6303  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6304  case ALLOCATION_TYPE_DEDICATED:
    6305  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6306  default:
    6307  VMA_ASSERT(0);
    6308  return UINT32_MAX;
    6309  }
    6310 }
    6311 
    6312 void* VmaAllocation_T::GetMappedData() const
    6313 {
    6314  switch(m_Type)
    6315  {
    6316  case ALLOCATION_TYPE_BLOCK:
    6317  if(m_MapCount != 0)
    6318  {
    6319  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6320  VMA_ASSERT(pBlockData != VMA_NULL);
    6321  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6322  }
    6323  else
    6324  {
    6325  return VMA_NULL;
    6326  }
    6327  break;
    6328  case ALLOCATION_TYPE_DEDICATED:
    6329  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6330  return m_DedicatedAllocation.m_pMappedData;
    6331  default:
    6332  VMA_ASSERT(0);
    6333  return VMA_NULL;
    6334  }
    6335 }
    6336 
    6337 bool VmaAllocation_T::CanBecomeLost() const
    6338 {
    6339  switch(m_Type)
    6340  {
    6341  case ALLOCATION_TYPE_BLOCK:
    6342  return m_BlockAllocation.m_CanBecomeLost;
    6343  case ALLOCATION_TYPE_DEDICATED:
    6344  return false;
    6345  default:
    6346  VMA_ASSERT(0);
    6347  return false;
    6348  }
    6349 }
    6350 
    6351 VmaPool VmaAllocation_T::GetPool() const
    6352 {
    6353  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6354  return m_BlockAllocation.m_hPool;
    6355 }
    6356 
    6357 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6358 {
    6359  VMA_ASSERT(CanBecomeLost());
    6360 
    6361  /*
    6362  Warning: This is a carefully designed algorithm.
    6363  Do not modify unless you really know what you're doing :)
    6364  */
    6365  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6366  for(;;)
    6367  {
    6368  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6369  {
    6370  VMA_ASSERT(0);
    6371  return false;
    6372  }
    6373  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6374  {
    6375  return false;
    6376  }
    6377  else // Last use time earlier than current time.
    6378  {
    6379  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6380  {
    6381  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6382  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6383  return true;
    6384  }
    6385  }
    6386  }
    6387 }
    6388 
    6389 #if VMA_STATS_STRING_ENABLED
    6390 
    6391 // Correspond to values of enum VmaSuballocationType.
    6392 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6393  "FREE",
    6394  "UNKNOWN",
    6395  "BUFFER",
    6396  "IMAGE_UNKNOWN",
    6397  "IMAGE_LINEAR",
    6398  "IMAGE_OPTIMAL",
    6399 };
    6400 
    6401 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6402 {
    6403  json.WriteString("Type");
    6404  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6405 
    6406  json.WriteString("Size");
    6407  json.WriteNumber(m_Size);
    6408 
    6409  if(m_pUserData != VMA_NULL)
    6410  {
    6411  json.WriteString("UserData");
    6412  if(IsUserDataString())
    6413  {
    6414  json.WriteString((const char*)m_pUserData);
    6415  }
    6416  else
    6417  {
    6418  json.BeginString();
    6419  json.ContinueString_Pointer(m_pUserData);
    6420  json.EndString();
    6421  }
    6422  }
    6423 
    6424  json.WriteString("CreationFrameIndex");
    6425  json.WriteNumber(m_CreationFrameIndex);
    6426 
    6427  json.WriteString("LastUseFrameIndex");
    6428  json.WriteNumber(GetLastUseFrameIndex());
    6429 
    6430  if(m_BufferImageUsage != 0)
    6431  {
    6432  json.WriteString("Usage");
    6433  json.WriteNumber(m_BufferImageUsage);
    6434  }
    6435 }
    6436 
    6437 #endif
    6438 
    6439 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6440 {
    6441  VMA_ASSERT(IsUserDataString());
    6442  if(m_pUserData != VMA_NULL)
    6443  {
    6444  char* const oldStr = (char*)m_pUserData;
    6445  const size_t oldStrLen = strlen(oldStr);
    6446  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6447  m_pUserData = VMA_NULL;
    6448  }
    6449 }
    6450 
    6451 void VmaAllocation_T::BlockAllocMap()
    6452 {
    6453  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6454 
    6455  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6456  {
    6457  ++m_MapCount;
    6458  }
    6459  else
    6460  {
    6461  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6462  }
    6463 }
    6464 
    6465 void VmaAllocation_T::BlockAllocUnmap()
    6466 {
    6467  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6468 
    6469  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6470  {
    6471  --m_MapCount;
    6472  }
    6473  else
    6474  {
    6475  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6476  }
    6477 }
    6478 
    6479 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6480 {
    6481  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6482 
    6483  if(m_MapCount != 0)
    6484  {
    6485  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6486  {
    6487  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6488  *ppData = m_DedicatedAllocation.m_pMappedData;
    6489  ++m_MapCount;
    6490  return VK_SUCCESS;
    6491  }
    6492  else
    6493  {
    6494  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6495  return VK_ERROR_MEMORY_MAP_FAILED;
    6496  }
    6497  }
    6498  else
    6499  {
    6500  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6501  hAllocator->m_hDevice,
    6502  m_DedicatedAllocation.m_hMemory,
    6503  0, // offset
    6504  VK_WHOLE_SIZE,
    6505  0, // flags
    6506  ppData);
    6507  if(result == VK_SUCCESS)
    6508  {
    6509  m_DedicatedAllocation.m_pMappedData = *ppData;
    6510  m_MapCount = 1;
    6511  }
    6512  return result;
    6513  }
    6514 }
    6515 
    6516 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6517 {
    6518  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6519 
    6520  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6521  {
    6522  --m_MapCount;
    6523  if(m_MapCount == 0)
    6524  {
    6525  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6526  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6527  hAllocator->m_hDevice,
    6528  m_DedicatedAllocation.m_hMemory);
    6529  }
    6530  }
    6531  else
    6532  {
    6533  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6534  }
    6535 }
    6536 
    6537 #if VMA_STATS_STRING_ENABLED
    6538 
    6539 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6540 {
    6541  json.BeginObject();
    6542 
    6543  json.WriteString("Blocks");
    6544  json.WriteNumber(stat.blockCount);
    6545 
    6546  json.WriteString("Allocations");
    6547  json.WriteNumber(stat.allocationCount);
    6548 
    6549  json.WriteString("UnusedRanges");
    6550  json.WriteNumber(stat.unusedRangeCount);
    6551 
    6552  json.WriteString("UsedBytes");
    6553  json.WriteNumber(stat.usedBytes);
    6554 
    6555  json.WriteString("UnusedBytes");
    6556  json.WriteNumber(stat.unusedBytes);
    6557 
    6558  if(stat.allocationCount > 1)
    6559  {
    6560  json.WriteString("AllocationSize");
    6561  json.BeginObject(true);
    6562  json.WriteString("Min");
    6563  json.WriteNumber(stat.allocationSizeMin);
    6564  json.WriteString("Avg");
    6565  json.WriteNumber(stat.allocationSizeAvg);
    6566  json.WriteString("Max");
    6567  json.WriteNumber(stat.allocationSizeMax);
    6568  json.EndObject();
    6569  }
    6570 
    6571  if(stat.unusedRangeCount > 1)
    6572  {
    6573  json.WriteString("UnusedRangeSize");
    6574  json.BeginObject(true);
    6575  json.WriteString("Min");
    6576  json.WriteNumber(stat.unusedRangeSizeMin);
    6577  json.WriteString("Avg");
    6578  json.WriteNumber(stat.unusedRangeSizeAvg);
    6579  json.WriteString("Max");
    6580  json.WriteNumber(stat.unusedRangeSizeMax);
    6581  json.EndObject();
    6582  }
    6583 
    6584  json.EndObject();
    6585 }
    6586 
    6587 #endif // #if VMA_STATS_STRING_ENABLED
    6588 
    6589 struct VmaSuballocationItemSizeLess
    6590 {
    6591  bool operator()(
    6592  const VmaSuballocationList::iterator lhs,
    6593  const VmaSuballocationList::iterator rhs) const
    6594  {
    6595  return lhs->size < rhs->size;
    6596  }
    6597  bool operator()(
    6598  const VmaSuballocationList::iterator lhs,
    6599  VkDeviceSize rhsSize) const
    6600  {
    6601  return lhs->size < rhsSize;
    6602  }
    6603 };
    6604 
    6605 
    6607 // class VmaBlockMetadata
    6608 
    6609 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6610  m_Size(0),
    6611  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6612 {
    6613 }
    6614 
    6615 #if VMA_STATS_STRING_ENABLED
    6616 
    6617 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6618  VkDeviceSize unusedBytes,
    6619  size_t allocationCount,
    6620  size_t unusedRangeCount) const
    6621 {
    6622  json.BeginObject();
    6623 
    6624  json.WriteString("TotalBytes");
    6625  json.WriteNumber(GetSize());
    6626 
    6627  json.WriteString("UnusedBytes");
    6628  json.WriteNumber(unusedBytes);
    6629 
    6630  json.WriteString("Allocations");
    6631  json.WriteNumber((uint64_t)allocationCount);
    6632 
    6633  json.WriteString("UnusedRanges");
    6634  json.WriteNumber((uint64_t)unusedRangeCount);
    6635 
    6636  json.WriteString("Suballocations");
    6637  json.BeginArray();
    6638 }
    6639 
    6640 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6641  VkDeviceSize offset,
    6642  VmaAllocation hAllocation) const
    6643 {
    6644  json.BeginObject(true);
    6645 
    6646  json.WriteString("Offset");
    6647  json.WriteNumber(offset);
    6648 
    6649  hAllocation->PrintParameters(json);
    6650 
    6651  json.EndObject();
    6652 }
    6653 
    6654 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6655  VkDeviceSize offset,
    6656  VkDeviceSize size) const
    6657 {
    6658  json.BeginObject(true);
    6659 
    6660  json.WriteString("Offset");
    6661  json.WriteNumber(offset);
    6662 
    6663  json.WriteString("Type");
    6664  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6665 
    6666  json.WriteString("Size");
    6667  json.WriteNumber(size);
    6668 
    6669  json.EndObject();
    6670 }
    6671 
    6672 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6673 {
    6674  json.EndArray();
    6675  json.EndObject();
    6676 }
    6677 
    6678 #endif // #if VMA_STATS_STRING_ENABLED
    6679 
    6681 // class VmaBlockMetadata_Generic
    6682 
    6683 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6684  VmaBlockMetadata(hAllocator),
    6685  m_FreeCount(0),
    6686  m_SumFreeSize(0),
    6687  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6688  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6689 {
    6690 }
    6691 
    6692 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6693 {
    6694 }
    6695 
    6696 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6697 {
    6698  VmaBlockMetadata::Init(size);
    6699 
    6700  m_FreeCount = 1;
    6701  m_SumFreeSize = size;
    6702 
    6703  VmaSuballocation suballoc = {};
    6704  suballoc.offset = 0;
    6705  suballoc.size = size;
    6706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6707  suballoc.hAllocation = VK_NULL_HANDLE;
    6708 
    6709  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6710  m_Suballocations.push_back(suballoc);
    6711  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6712  --suballocItem;
    6713  m_FreeSuballocationsBySize.push_back(suballocItem);
    6714 }
    6715 
    6716 bool VmaBlockMetadata_Generic::Validate() const
    6717 {
    6718  VMA_VALIDATE(!m_Suballocations.empty());
    6719 
    6720  // Expected offset of new suballocation as calculated from previous ones.
    6721  VkDeviceSize calculatedOffset = 0;
    6722  // Expected number of free suballocations as calculated from traversing their list.
    6723  uint32_t calculatedFreeCount = 0;
    6724  // Expected sum size of free suballocations as calculated from traversing their list.
    6725  VkDeviceSize calculatedSumFreeSize = 0;
    6726  // Expected number of free suballocations that should be registered in
    6727  // m_FreeSuballocationsBySize calculated from traversing their list.
    6728  size_t freeSuballocationsToRegister = 0;
    6729  // True if previous visited suballocation was free.
    6730  bool prevFree = false;
    6731 
    6732  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6733  suballocItem != m_Suballocations.cend();
    6734  ++suballocItem)
    6735  {
    6736  const VmaSuballocation& subAlloc = *suballocItem;
    6737 
    6738  // Actual offset of this suballocation doesn't match expected one.
    6739  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6740 
    6741  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6742  // Two adjacent free suballocations are invalid. They should be merged.
    6743  VMA_VALIDATE(!prevFree || !currFree);
    6744 
    6745  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6746 
    6747  if(currFree)
    6748  {
    6749  calculatedSumFreeSize += subAlloc.size;
    6750  ++calculatedFreeCount;
    6751  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6752  {
    6753  ++freeSuballocationsToRegister;
    6754  }
    6755 
    6756  // Margin required between allocations - every free space must be at least that large.
    6757  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6758  }
    6759  else
    6760  {
    6761  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6762  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6763 
    6764  // Margin required between allocations - previous allocation must be free.
    6765  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6766  }
    6767 
    6768  calculatedOffset += subAlloc.size;
    6769  prevFree = currFree;
    6770  }
    6771 
    6772  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6773  // match expected one.
    6774  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6775 
    6776  VkDeviceSize lastSize = 0;
    6777  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6778  {
    6779  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6780 
    6781  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6782  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6783  // They must be sorted by size ascending.
    6784  VMA_VALIDATE(suballocItem->size >= lastSize);
    6785 
    6786  lastSize = suballocItem->size;
    6787  }
    6788 
    6789  // Check if totals match calculacted values.
    6790  VMA_VALIDATE(ValidateFreeSuballocationList());
    6791  VMA_VALIDATE(calculatedOffset == GetSize());
    6792  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6793  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6794 
    6795  return true;
    6796 }
    6797 
    6798 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6799 {
    6800  if(!m_FreeSuballocationsBySize.empty())
    6801  {
    6802  return m_FreeSuballocationsBySize.back()->size;
    6803  }
    6804  else
    6805  {
    6806  return 0;
    6807  }
    6808 }
    6809 
    6810 bool VmaBlockMetadata_Generic::IsEmpty() const
    6811 {
    6812  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6813 }
    6814 
    6815 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6816 {
    6817  outInfo.blockCount = 1;
    6818 
    6819  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6820  outInfo.allocationCount = rangeCount - m_FreeCount;
    6821  outInfo.unusedRangeCount = m_FreeCount;
    6822 
    6823  outInfo.unusedBytes = m_SumFreeSize;
    6824  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6825 
    6826  outInfo.allocationSizeMin = UINT64_MAX;
    6827  outInfo.allocationSizeMax = 0;
    6828  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6829  outInfo.unusedRangeSizeMax = 0;
    6830 
    6831  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6832  suballocItem != m_Suballocations.cend();
    6833  ++suballocItem)
    6834  {
    6835  const VmaSuballocation& suballoc = *suballocItem;
    6836  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6837  {
    6838  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6839  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6840  }
    6841  else
    6842  {
    6843  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6844  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6845  }
    6846  }
    6847 }
    6848 
    6849 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6850 {
    6851  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6852 
    6853  inoutStats.size += GetSize();
    6854  inoutStats.unusedSize += m_SumFreeSize;
    6855  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6856  inoutStats.unusedRangeCount += m_FreeCount;
    6857  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6858 }
    6859 
    6860 #if VMA_STATS_STRING_ENABLED
    6861 
    6862 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6863 {
    6864  PrintDetailedMap_Begin(json,
    6865  m_SumFreeSize, // unusedBytes
    6866  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6867  m_FreeCount); // unusedRangeCount
    6868 
    6869  size_t i = 0;
    6870  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6871  suballocItem != m_Suballocations.cend();
    6872  ++suballocItem, ++i)
    6873  {
    6874  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6875  {
    6876  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6877  }
    6878  else
    6879  {
    6880  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6881  }
    6882  }
    6883 
    6884  PrintDetailedMap_End(json);
    6885 }
    6886 
    6887 #endif // #if VMA_STATS_STRING_ENABLED
    6888 
    6889 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6890  uint32_t currentFrameIndex,
    6891  uint32_t frameInUseCount,
    6892  VkDeviceSize bufferImageGranularity,
    6893  VkDeviceSize allocSize,
    6894  VkDeviceSize allocAlignment,
    6895  bool upperAddress,
    6896  VmaSuballocationType allocType,
    6897  bool canMakeOtherLost,
    6898  uint32_t strategy,
    6899  VmaAllocationRequest* pAllocationRequest)
    6900 {
    6901  VMA_ASSERT(allocSize > 0);
    6902  VMA_ASSERT(!upperAddress);
    6903  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6904  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6905  VMA_HEAVY_ASSERT(Validate());
    6906 
    6907  // There is not enough total free space in this block to fullfill the request: Early return.
    6908  if(canMakeOtherLost == false &&
    6909  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6910  {
    6911  return false;
    6912  }
    6913 
    6914  // New algorithm, efficiently searching freeSuballocationsBySize.
    6915  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6916  if(freeSuballocCount > 0)
    6917  {
    6919  {
    6920  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6921  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6922  m_FreeSuballocationsBySize.data(),
    6923  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6924  allocSize + 2 * VMA_DEBUG_MARGIN,
    6925  VmaSuballocationItemSizeLess());
    6926  size_t index = it - m_FreeSuballocationsBySize.data();
    6927  for(; index < freeSuballocCount; ++index)
    6928  {
    6929  if(CheckAllocation(
    6930  currentFrameIndex,
    6931  frameInUseCount,
    6932  bufferImageGranularity,
    6933  allocSize,
    6934  allocAlignment,
    6935  allocType,
    6936  m_FreeSuballocationsBySize[index],
    6937  false, // canMakeOtherLost
    6938  &pAllocationRequest->offset,
    6939  &pAllocationRequest->itemsToMakeLostCount,
    6940  &pAllocationRequest->sumFreeSize,
    6941  &pAllocationRequest->sumItemSize))
    6942  {
    6943  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6944  return true;
    6945  }
    6946  }
    6947  }
    6948  else // WORST_FIT, FIRST_FIT
    6949  {
    6950  // Search staring from biggest suballocations.
    6951  for(size_t index = freeSuballocCount; index--; )
    6952  {
    6953  if(CheckAllocation(
    6954  currentFrameIndex,
    6955  frameInUseCount,
    6956  bufferImageGranularity,
    6957  allocSize,
    6958  allocAlignment,
    6959  allocType,
    6960  m_FreeSuballocationsBySize[index],
    6961  false, // canMakeOtherLost
    6962  &pAllocationRequest->offset,
    6963  &pAllocationRequest->itemsToMakeLostCount,
    6964  &pAllocationRequest->sumFreeSize,
    6965  &pAllocationRequest->sumItemSize))
    6966  {
    6967  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6968  return true;
    6969  }
    6970  }
    6971  }
    6972  }
    6973 
    6974  if(canMakeOtherLost)
    6975  {
    6976  // Brute-force algorithm. TODO: Come up with something better.
    6977 
    6978  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6979  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6980 
    6981  VmaAllocationRequest tmpAllocRequest = {};
    6982  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6983  suballocIt != m_Suballocations.end();
    6984  ++suballocIt)
    6985  {
    6986  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6987  suballocIt->hAllocation->CanBecomeLost())
    6988  {
    6989  if(CheckAllocation(
    6990  currentFrameIndex,
    6991  frameInUseCount,
    6992  bufferImageGranularity,
    6993  allocSize,
    6994  allocAlignment,
    6995  allocType,
    6996  suballocIt,
    6997  canMakeOtherLost,
    6998  &tmpAllocRequest.offset,
    6999  &tmpAllocRequest.itemsToMakeLostCount,
    7000  &tmpAllocRequest.sumFreeSize,
    7001  &tmpAllocRequest.sumItemSize))
    7002  {
    7003  tmpAllocRequest.item = suballocIt;
    7004 
    7005  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7007  {
    7008  *pAllocationRequest = tmpAllocRequest;
    7009  }
    7010  }
    7011  }
    7012  }
    7013 
    7014  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7015  {
    7016  return true;
    7017  }
    7018  }
    7019 
    7020  return false;
    7021 }
    7022 
    7023 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7024  uint32_t currentFrameIndex,
    7025  uint32_t frameInUseCount,
    7026  VmaAllocationRequest* pAllocationRequest)
    7027 {
    7028  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7029  {
    7030  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7031  {
    7032  ++pAllocationRequest->item;
    7033  }
    7034  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7035  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7036  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7037  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7038  {
    7039  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7040  --pAllocationRequest->itemsToMakeLostCount;
    7041  }
    7042  else
    7043  {
    7044  return false;
    7045  }
    7046  }
    7047 
    7048  VMA_HEAVY_ASSERT(Validate());
    7049  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7050  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7051 
    7052  return true;
    7053 }
    7054 
    7055 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7056 {
    7057  uint32_t lostAllocationCount = 0;
    7058  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7059  it != m_Suballocations.end();
    7060  ++it)
    7061  {
    7062  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7063  it->hAllocation->CanBecomeLost() &&
    7064  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7065  {
    7066  it = FreeSuballocation(it);
    7067  ++lostAllocationCount;
    7068  }
    7069  }
    7070  return lostAllocationCount;
    7071 }
    7072 
    7073 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7074 {
    7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7076  it != m_Suballocations.end();
    7077  ++it)
    7078  {
    7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7080  {
    7081  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7082  {
    7083  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7084  return VK_ERROR_VALIDATION_FAILED_EXT;
    7085  }
    7086  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7087  {
    7088  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7089  return VK_ERROR_VALIDATION_FAILED_EXT;
    7090  }
    7091  }
    7092  }
    7093 
    7094  return VK_SUCCESS;
    7095 }
    7096 
    7097 void VmaBlockMetadata_Generic::Alloc(
    7098  const VmaAllocationRequest& request,
    7099  VmaSuballocationType type,
    7100  VkDeviceSize allocSize,
    7101  bool upperAddress,
    7102  VmaAllocation hAllocation)
    7103 {
    7104  VMA_ASSERT(!upperAddress);
    7105  VMA_ASSERT(request.item != m_Suballocations.end());
    7106  VmaSuballocation& suballoc = *request.item;
    7107  // Given suballocation is a free block.
    7108  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7109  // Given offset is inside this suballocation.
    7110  VMA_ASSERT(request.offset >= suballoc.offset);
    7111  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7112  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7113  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7114 
    7115  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7116  // it to become used.
    7117  UnregisterFreeSuballocation(request.item);
    7118 
    7119  suballoc.offset = request.offset;
    7120  suballoc.size = allocSize;
    7121  suballoc.type = type;
    7122  suballoc.hAllocation = hAllocation;
    7123 
    7124  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7125  if(paddingEnd)
    7126  {
    7127  VmaSuballocation paddingSuballoc = {};
    7128  paddingSuballoc.offset = request.offset + allocSize;
    7129  paddingSuballoc.size = paddingEnd;
    7130  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7131  VmaSuballocationList::iterator next = request.item;
    7132  ++next;
    7133  const VmaSuballocationList::iterator paddingEndItem =
    7134  m_Suballocations.insert(next, paddingSuballoc);
    7135  RegisterFreeSuballocation(paddingEndItem);
    7136  }
    7137 
    7138  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7139  if(paddingBegin)
    7140  {
    7141  VmaSuballocation paddingSuballoc = {};
    7142  paddingSuballoc.offset = request.offset - paddingBegin;
    7143  paddingSuballoc.size = paddingBegin;
    7144  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7145  const VmaSuballocationList::iterator paddingBeginItem =
    7146  m_Suballocations.insert(request.item, paddingSuballoc);
    7147  RegisterFreeSuballocation(paddingBeginItem);
    7148  }
    7149 
    7150  // Update totals.
    7151  m_FreeCount = m_FreeCount - 1;
    7152  if(paddingBegin > 0)
    7153  {
    7154  ++m_FreeCount;
    7155  }
    7156  if(paddingEnd > 0)
    7157  {
    7158  ++m_FreeCount;
    7159  }
    7160  m_SumFreeSize -= allocSize;
    7161 }
    7162 
    7163 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7164 {
    7165  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7166  suballocItem != m_Suballocations.end();
    7167  ++suballocItem)
    7168  {
    7169  VmaSuballocation& suballoc = *suballocItem;
    7170  if(suballoc.hAllocation == allocation)
    7171  {
    7172  FreeSuballocation(suballocItem);
    7173  VMA_HEAVY_ASSERT(Validate());
    7174  return;
    7175  }
    7176  }
    7177  VMA_ASSERT(0 && "Not found!");
    7178 }
    7179 
    7180 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7181 {
    7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7183  suballocItem != m_Suballocations.end();
    7184  ++suballocItem)
    7185  {
    7186  VmaSuballocation& suballoc = *suballocItem;
    7187  if(suballoc.offset == offset)
    7188  {
    7189  FreeSuballocation(suballocItem);
    7190  return;
    7191  }
    7192  }
    7193  VMA_ASSERT(0 && "Not found!");
    7194 }
    7195 
    7196 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7197 {
    7198  VkDeviceSize lastSize = 0;
    7199  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7200  {
    7201  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7202 
    7203  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7204  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7205  VMA_VALIDATE(it->size >= lastSize);
    7206  lastSize = it->size;
    7207  }
    7208  return true;
    7209 }
    7210 
    7211 bool VmaBlockMetadata_Generic::CheckAllocation(
    7212  uint32_t currentFrameIndex,
    7213  uint32_t frameInUseCount,
    7214  VkDeviceSize bufferImageGranularity,
    7215  VkDeviceSize allocSize,
    7216  VkDeviceSize allocAlignment,
    7217  VmaSuballocationType allocType,
    7218  VmaSuballocationList::const_iterator suballocItem,
    7219  bool canMakeOtherLost,
    7220  VkDeviceSize* pOffset,
    7221  size_t* itemsToMakeLostCount,
    7222  VkDeviceSize* pSumFreeSize,
    7223  VkDeviceSize* pSumItemSize) const
    7224 {
    7225  VMA_ASSERT(allocSize > 0);
    7226  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7227  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7228  VMA_ASSERT(pOffset != VMA_NULL);
    7229 
    7230  *itemsToMakeLostCount = 0;
    7231  *pSumFreeSize = 0;
    7232  *pSumItemSize = 0;
    7233 
    7234  if(canMakeOtherLost)
    7235  {
    7236  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7237  {
    7238  *pSumFreeSize = suballocItem->size;
    7239  }
    7240  else
    7241  {
    7242  if(suballocItem->hAllocation->CanBecomeLost() &&
    7243  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7244  {
    7245  ++*itemsToMakeLostCount;
    7246  *pSumItemSize = suballocItem->size;
    7247  }
    7248  else
    7249  {
    7250  return false;
    7251  }
    7252  }
    7253 
    7254  // Remaining size is too small for this request: Early return.
    7255  if(GetSize() - suballocItem->offset < allocSize)
    7256  {
    7257  return false;
    7258  }
    7259 
    7260  // Start from offset equal to beginning of this suballocation.
    7261  *pOffset = suballocItem->offset;
    7262 
    7263  // Apply VMA_DEBUG_MARGIN at the beginning.
    7264  if(VMA_DEBUG_MARGIN > 0)
    7265  {
    7266  *pOffset += VMA_DEBUG_MARGIN;
    7267  }
    7268 
    7269  // Apply alignment.
    7270  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7271 
    7272  // Check previous suballocations for BufferImageGranularity conflicts.
    7273  // Make bigger alignment if necessary.
    7274  if(bufferImageGranularity > 1)
    7275  {
    7276  bool bufferImageGranularityConflict = false;
    7277  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7278  while(prevSuballocItem != m_Suballocations.cbegin())
    7279  {
    7280  --prevSuballocItem;
    7281  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7282  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7283  {
    7284  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7285  {
    7286  bufferImageGranularityConflict = true;
    7287  break;
    7288  }
    7289  }
    7290  else
    7291  // Already on previous page.
    7292  break;
    7293  }
    7294  if(bufferImageGranularityConflict)
    7295  {
    7296  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7297  }
    7298  }
    7299 
    7300  // Now that we have final *pOffset, check if we are past suballocItem.
    7301  // If yes, return false - this function should be called for another suballocItem as starting point.
    7302  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7303  {
    7304  return false;
    7305  }
    7306 
    7307  // Calculate padding at the beginning based on current offset.
    7308  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7309 
    7310  // Calculate required margin at the end.
    7311  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7312 
    7313  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7314  // Another early return check.
    7315  if(suballocItem->offset + totalSize > GetSize())
    7316  {
    7317  return false;
    7318  }
    7319 
    7320  // Advance lastSuballocItem until desired size is reached.
    7321  // Update itemsToMakeLostCount.
    7322  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7323  if(totalSize > suballocItem->size)
    7324  {
    7325  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7326  while(remainingSize > 0)
    7327  {
    7328  ++lastSuballocItem;
    7329  if(lastSuballocItem == m_Suballocations.cend())
    7330  {
    7331  return false;
    7332  }
    7333  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7334  {
    7335  *pSumFreeSize += lastSuballocItem->size;
    7336  }
    7337  else
    7338  {
    7339  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7340  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7341  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7342  {
    7343  ++*itemsToMakeLostCount;
    7344  *pSumItemSize += lastSuballocItem->size;
    7345  }
    7346  else
    7347  {
    7348  return false;
    7349  }
    7350  }
    7351  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7352  remainingSize - lastSuballocItem->size : 0;
    7353  }
    7354  }
    7355 
    7356  // Check next suballocations for BufferImageGranularity conflicts.
    7357  // If conflict exists, we must mark more allocations lost or fail.
    7358  if(bufferImageGranularity > 1)
    7359  {
    7360  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7361  ++nextSuballocItem;
    7362  while(nextSuballocItem != m_Suballocations.cend())
    7363  {
    7364  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7365  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7366  {
    7367  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7368  {
    7369  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7370  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7371  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7372  {
    7373  ++*itemsToMakeLostCount;
    7374  }
    7375  else
    7376  {
    7377  return false;
    7378  }
    7379  }
    7380  }
    7381  else
    7382  {
    7383  // Already on next page.
    7384  break;
    7385  }
    7386  ++nextSuballocItem;
    7387  }
    7388  }
    7389  }
    7390  else
    7391  {
    7392  const VmaSuballocation& suballoc = *suballocItem;
    7393  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7394 
    7395  *pSumFreeSize = suballoc.size;
    7396 
    7397  // Size of this suballocation is too small for this request: Early return.
    7398  if(suballoc.size < allocSize)
    7399  {
    7400  return false;
    7401  }
    7402 
    7403  // Start from offset equal to beginning of this suballocation.
    7404  *pOffset = suballoc.offset;
    7405 
    7406  // Apply VMA_DEBUG_MARGIN at the beginning.
    7407  if(VMA_DEBUG_MARGIN > 0)
    7408  {
    7409  *pOffset += VMA_DEBUG_MARGIN;
    7410  }
    7411 
    7412  // Apply alignment.
    7413  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7414 
    7415  // Check previous suballocations for BufferImageGranularity conflicts.
    7416  // Make bigger alignment if necessary.
    7417  if(bufferImageGranularity > 1)
    7418  {
    7419  bool bufferImageGranularityConflict = false;
    7420  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7421  while(prevSuballocItem != m_Suballocations.cbegin())
    7422  {
    7423  --prevSuballocItem;
    7424  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7425  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7426  {
    7427  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7428  {
    7429  bufferImageGranularityConflict = true;
    7430  break;
    7431  }
    7432  }
    7433  else
    7434  // Already on previous page.
    7435  break;
    7436  }
    7437  if(bufferImageGranularityConflict)
    7438  {
    7439  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7440  }
    7441  }
    7442 
    7443  // Calculate padding at the beginning based on current offset.
    7444  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7445 
    7446  // Calculate required margin at the end.
    7447  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7448 
    7449  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7450  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7451  {
    7452  return false;
    7453  }
    7454 
    7455  // Check next suballocations for BufferImageGranularity conflicts.
    7456  // If conflict exists, allocation cannot be made here.
    7457  if(bufferImageGranularity > 1)
    7458  {
    7459  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7460  ++nextSuballocItem;
    7461  while(nextSuballocItem != m_Suballocations.cend())
    7462  {
    7463  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7464  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7465  {
    7466  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7467  {
    7468  return false;
    7469  }
    7470  }
    7471  else
    7472  {
    7473  // Already on next page.
    7474  break;
    7475  }
    7476  ++nextSuballocItem;
    7477  }
    7478  }
    7479  }
    7480 
    7481  // All tests passed: Success. pOffset is already filled.
    7482  return true;
    7483 }
    7484 
    7485 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7486 {
    7487  VMA_ASSERT(item != m_Suballocations.end());
    7488  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7489 
    7490  VmaSuballocationList::iterator nextItem = item;
    7491  ++nextItem;
    7492  VMA_ASSERT(nextItem != m_Suballocations.end());
    7493  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7494 
    7495  item->size += nextItem->size;
    7496  --m_FreeCount;
    7497  m_Suballocations.erase(nextItem);
    7498 }
    7499 
    7500 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7501 {
    7502  // Change this suballocation to be marked as free.
    7503  VmaSuballocation& suballoc = *suballocItem;
    7504  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7505  suballoc.hAllocation = VK_NULL_HANDLE;
    7506 
    7507  // Update totals.
    7508  ++m_FreeCount;
    7509  m_SumFreeSize += suballoc.size;
    7510 
    7511  // Merge with previous and/or next suballocation if it's also free.
    7512  bool mergeWithNext = false;
    7513  bool mergeWithPrev = false;
    7514 
    7515  VmaSuballocationList::iterator nextItem = suballocItem;
    7516  ++nextItem;
    7517  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7518  {
    7519  mergeWithNext = true;
    7520  }
    7521 
    7522  VmaSuballocationList::iterator prevItem = suballocItem;
    7523  if(suballocItem != m_Suballocations.begin())
    7524  {
    7525  --prevItem;
    7526  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7527  {
    7528  mergeWithPrev = true;
    7529  }
    7530  }
    7531 
    7532  if(mergeWithNext)
    7533  {
    7534  UnregisterFreeSuballocation(nextItem);
    7535  MergeFreeWithNext(suballocItem);
    7536  }
    7537 
    7538  if(mergeWithPrev)
    7539  {
    7540  UnregisterFreeSuballocation(prevItem);
    7541  MergeFreeWithNext(prevItem);
    7542  RegisterFreeSuballocation(prevItem);
    7543  return prevItem;
    7544  }
    7545  else
    7546  {
    7547  RegisterFreeSuballocation(suballocItem);
    7548  return suballocItem;
    7549  }
    7550 }
    7551 
    7552 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7553 {
    7554  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7555  VMA_ASSERT(item->size > 0);
    7556 
    7557  // You may want to enable this validation at the beginning or at the end of
    7558  // this function, depending on what do you want to check.
    7559  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7560 
    7561  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7562  {
    7563  if(m_FreeSuballocationsBySize.empty())
    7564  {
    7565  m_FreeSuballocationsBySize.push_back(item);
    7566  }
    7567  else
    7568  {
    7569  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7570  }
    7571  }
    7572 
    7573  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7574 }
    7575 
    7576 
    7577 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7578 {
    7579  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7580  VMA_ASSERT(item->size > 0);
    7581 
    7582  // You may want to enable this validation at the beginning or at the end of
    7583  // this function, depending on what do you want to check.
    7584  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7585 
    7586  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7587  {
    7588  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7589  m_FreeSuballocationsBySize.data(),
    7590  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7591  item,
    7592  VmaSuballocationItemSizeLess());
    7593  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7594  index < m_FreeSuballocationsBySize.size();
    7595  ++index)
    7596  {
    7597  if(m_FreeSuballocationsBySize[index] == item)
    7598  {
    7599  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7600  return;
    7601  }
    7602  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7603  }
    7604  VMA_ASSERT(0 && "Not found.");
    7605  }
    7606 
    7607  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7608 }
    7609 
    7611 // class VmaBlockMetadata_Linear
    7612 
    7613 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7614  VmaBlockMetadata(hAllocator),
    7615  m_SumFreeSize(0),
    7616  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7617  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7618  m_1stVectorIndex(0),
    7619  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7620  m_1stNullItemsBeginCount(0),
    7621  m_1stNullItemsMiddleCount(0),
    7622  m_2ndNullItemsCount(0)
    7623 {
    7624 }
    7625 
    7626 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7627 {
    7628 }
    7629 
    7630 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7631 {
    7632  VmaBlockMetadata::Init(size);
    7633  m_SumFreeSize = size;
    7634 }
    7635 
    7636 bool VmaBlockMetadata_Linear::Validate() const
    7637 {
    7638  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7639  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7640 
    7641  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7642  VMA_VALIDATE(!suballocations1st.empty() ||
    7643  suballocations2nd.empty() ||
    7644  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7645 
    7646  if(!suballocations1st.empty())
    7647  {
    7648  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7649  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7650  // Null item at the end should be just pop_back().
    7651  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7652  }
    7653  if(!suballocations2nd.empty())
    7654  {
    7655  // Null item at the end should be just pop_back().
    7656  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7657  }
    7658 
    7659  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7660  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7661 
    7662  VkDeviceSize sumUsedSize = 0;
    7663  const size_t suballoc1stCount = suballocations1st.size();
    7664  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7665 
    7666  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7667  {
    7668  const size_t suballoc2ndCount = suballocations2nd.size();
    7669  size_t nullItem2ndCount = 0;
    7670  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7671  {
    7672  const VmaSuballocation& suballoc = suballocations2nd[i];
    7673  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7674 
    7675  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7676  VMA_VALIDATE(suballoc.offset >= offset);
    7677 
    7678  if(!currFree)
    7679  {
    7680  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7681  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7682  sumUsedSize += suballoc.size;
    7683  }
    7684  else
    7685  {
    7686  ++nullItem2ndCount;
    7687  }
    7688 
    7689  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7690  }
    7691 
    7692  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7693  }
    7694 
    7695  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7696  {
    7697  const VmaSuballocation& suballoc = suballocations1st[i];
    7698  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7699  suballoc.hAllocation == VK_NULL_HANDLE);
    7700  }
    7701 
    7702  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7703 
    7704  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7705  {
    7706  const VmaSuballocation& suballoc = suballocations1st[i];
    7707  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7708 
    7709  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7710  VMA_VALIDATE(suballoc.offset >= offset);
    7711  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7712 
    7713  if(!currFree)
    7714  {
    7715  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7716  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7717  sumUsedSize += suballoc.size;
    7718  }
    7719  else
    7720  {
    7721  ++nullItem1stCount;
    7722  }
    7723 
    7724  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7725  }
    7726  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7727 
    7728  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7729  {
    7730  const size_t suballoc2ndCount = suballocations2nd.size();
    7731  size_t nullItem2ndCount = 0;
    7732  for(size_t i = suballoc2ndCount; i--; )
    7733  {
    7734  const VmaSuballocation& suballoc = suballocations2nd[i];
    7735  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7736 
    7737  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7738  VMA_VALIDATE(suballoc.offset >= offset);
    7739 
    7740  if(!currFree)
    7741  {
    7742  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7743  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7744  sumUsedSize += suballoc.size;
    7745  }
    7746  else
    7747  {
    7748  ++nullItem2ndCount;
    7749  }
    7750 
    7751  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7752  }
    7753 
    7754  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7755  }
    7756 
    7757  VMA_VALIDATE(offset <= GetSize());
    7758  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7759 
    7760  return true;
    7761 }
    7762 
    7763 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7764 {
    7765  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7766  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7767 }
    7768 
    7769 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7770 {
    7771  const VkDeviceSize size = GetSize();
    7772 
    7773  /*
    7774  We don't consider gaps inside allocation vectors with freed allocations because
    7775  they are not suitable for reuse in linear allocator. We consider only space that
    7776  is available for new allocations.
    7777  */
    7778  if(IsEmpty())
    7779  {
    7780  return size;
    7781  }
    7782 
    7783  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7784 
    7785  switch(m_2ndVectorMode)
    7786  {
    7787  case SECOND_VECTOR_EMPTY:
    7788  /*
    7789  Available space is after end of 1st, as well as before beginning of 1st (which
    7790  whould make it a ring buffer).
    7791  */
    7792  {
    7793  const size_t suballocations1stCount = suballocations1st.size();
    7794  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7795  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7796  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7797  return VMA_MAX(
    7798  firstSuballoc.offset,
    7799  size - (lastSuballoc.offset + lastSuballoc.size));
    7800  }
    7801  break;
    7802 
    7803  case SECOND_VECTOR_RING_BUFFER:
    7804  /*
    7805  Available space is only between end of 2nd and beginning of 1st.
    7806  */
    7807  {
    7808  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7809  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7810  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7811  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7812  }
    7813  break;
    7814 
    7815  case SECOND_VECTOR_DOUBLE_STACK:
    7816  /*
    7817  Available space is only between end of 1st and top of 2nd.
    7818  */
    7819  {
    7820  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7821  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7822  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7823  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7824  }
    7825  break;
    7826 
    7827  default:
    7828  VMA_ASSERT(0);
    7829  return 0;
    7830  }
    7831 }
    7832 
    7833 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7834 {
    7835  const VkDeviceSize size = GetSize();
    7836  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7838  const size_t suballoc1stCount = suballocations1st.size();
    7839  const size_t suballoc2ndCount = suballocations2nd.size();
    7840 
    7841  outInfo.blockCount = 1;
    7842  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7843  outInfo.unusedRangeCount = 0;
    7844  outInfo.usedBytes = 0;
    7845  outInfo.allocationSizeMin = UINT64_MAX;
    7846  outInfo.allocationSizeMax = 0;
    7847  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7848  outInfo.unusedRangeSizeMax = 0;
    7849 
    7850  VkDeviceSize lastOffset = 0;
    7851 
    7852  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7853  {
    7854  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7855  size_t nextAlloc2ndIndex = 0;
    7856  while(lastOffset < freeSpace2ndTo1stEnd)
    7857  {
    7858  // Find next non-null allocation or move nextAllocIndex to the end.
    7859  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7860  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7861  {
    7862  ++nextAlloc2ndIndex;
    7863  }
    7864 
    7865  // Found non-null allocation.
    7866  if(nextAlloc2ndIndex < suballoc2ndCount)
    7867  {
    7868  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7869 
    7870  // 1. Process free space before this allocation.
    7871  if(lastOffset < suballoc.offset)
    7872  {
    7873  // There is free space from lastOffset to suballoc.offset.
    7874  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7875  ++outInfo.unusedRangeCount;
    7876  outInfo.unusedBytes += unusedRangeSize;
    7877  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7878  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7879  }
    7880 
    7881  // 2. Process this allocation.
    7882  // There is allocation with suballoc.offset, suballoc.size.
    7883  outInfo.usedBytes += suballoc.size;
    7884  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7885  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7886 
    7887  // 3. Prepare for next iteration.
    7888  lastOffset = suballoc.offset + suballoc.size;
    7889  ++nextAlloc2ndIndex;
    7890  }
    7891  // We are at the end.
    7892  else
    7893  {
    7894  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7895  if(lastOffset < freeSpace2ndTo1stEnd)
    7896  {
    7897  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7898  ++outInfo.unusedRangeCount;
    7899  outInfo.unusedBytes += unusedRangeSize;
    7900  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7901  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7902  }
    7903 
    7904  // End of loop.
    7905  lastOffset = freeSpace2ndTo1stEnd;
    7906  }
    7907  }
    7908  }
    7909 
    7910  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7911  const VkDeviceSize freeSpace1stTo2ndEnd =
    7912  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7913  while(lastOffset < freeSpace1stTo2ndEnd)
    7914  {
    7915  // Find next non-null allocation or move nextAllocIndex to the end.
    7916  while(nextAlloc1stIndex < suballoc1stCount &&
    7917  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7918  {
    7919  ++nextAlloc1stIndex;
    7920  }
    7921 
    7922  // Found non-null allocation.
    7923  if(nextAlloc1stIndex < suballoc1stCount)
    7924  {
    7925  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7926 
    7927  // 1. Process free space before this allocation.
    7928  if(lastOffset < suballoc.offset)
    7929  {
    7930  // There is free space from lastOffset to suballoc.offset.
    7931  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7932  ++outInfo.unusedRangeCount;
    7933  outInfo.unusedBytes += unusedRangeSize;
    7934  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7935  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7936  }
    7937 
    7938  // 2. Process this allocation.
    7939  // There is allocation with suballoc.offset, suballoc.size.
    7940  outInfo.usedBytes += suballoc.size;
    7941  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7942  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7943 
    7944  // 3. Prepare for next iteration.
    7945  lastOffset = suballoc.offset + suballoc.size;
    7946  ++nextAlloc1stIndex;
    7947  }
    7948  // We are at the end.
    7949  else
    7950  {
    7951  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7952  if(lastOffset < freeSpace1stTo2ndEnd)
    7953  {
    7954  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7955  ++outInfo.unusedRangeCount;
    7956  outInfo.unusedBytes += unusedRangeSize;
    7957  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7958  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7959  }
    7960 
    7961  // End of loop.
    7962  lastOffset = freeSpace1stTo2ndEnd;
    7963  }
    7964  }
    7965 
    7966  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7967  {
    7968  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7969  while(lastOffset < size)
    7970  {
    7971  // Find next non-null allocation or move nextAllocIndex to the end.
    7972  while(nextAlloc2ndIndex != SIZE_MAX &&
    7973  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7974  {
    7975  --nextAlloc2ndIndex;
    7976  }
    7977 
    7978  // Found non-null allocation.
    7979  if(nextAlloc2ndIndex != SIZE_MAX)
    7980  {
    7981  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7982 
    7983  // 1. Process free space before this allocation.
    7984  if(lastOffset < suballoc.offset)
    7985  {
    7986  // There is free space from lastOffset to suballoc.offset.
    7987  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7988  ++outInfo.unusedRangeCount;
    7989  outInfo.unusedBytes += unusedRangeSize;
    7990  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7991  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7992  }
    7993 
    7994  // 2. Process this allocation.
    7995  // There is allocation with suballoc.offset, suballoc.size.
    7996  outInfo.usedBytes += suballoc.size;
    7997  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7998  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7999 
    8000  // 3. Prepare for next iteration.
    8001  lastOffset = suballoc.offset + suballoc.size;
    8002  --nextAlloc2ndIndex;
    8003  }
    8004  // We are at the end.
    8005  else
    8006  {
    8007  // There is free space from lastOffset to size.
    8008  if(lastOffset < size)
    8009  {
    8010  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8011  ++outInfo.unusedRangeCount;
    8012  outInfo.unusedBytes += unusedRangeSize;
    8013  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8014  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8015  }
    8016 
    8017  // End of loop.
    8018  lastOffset = size;
    8019  }
    8020  }
    8021  }
    8022 
    8023  outInfo.unusedBytes = size - outInfo.usedBytes;
    8024 }
    8025 
    8026 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8027 {
    8028  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8029  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8030  const VkDeviceSize size = GetSize();
    8031  const size_t suballoc1stCount = suballocations1st.size();
    8032  const size_t suballoc2ndCount = suballocations2nd.size();
    8033 
    8034  inoutStats.size += size;
    8035 
    8036  VkDeviceSize lastOffset = 0;
    8037 
    8038  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8039  {
    8040  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8041  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8042  while(lastOffset < freeSpace2ndTo1stEnd)
    8043  {
    8044  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8045  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8046  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8047  {
    8048  ++nextAlloc2ndIndex;
    8049  }
    8050 
    8051  // Found non-null allocation.
    8052  if(nextAlloc2ndIndex < suballoc2ndCount)
    8053  {
    8054  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8055 
    8056  // 1. Process free space before this allocation.
    8057  if(lastOffset < suballoc.offset)
    8058  {
    8059  // There is free space from lastOffset to suballoc.offset.
    8060  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8061  inoutStats.unusedSize += unusedRangeSize;
    8062  ++inoutStats.unusedRangeCount;
    8063  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8064  }
    8065 
    8066  // 2. Process this allocation.
    8067  // There is allocation with suballoc.offset, suballoc.size.
    8068  ++inoutStats.allocationCount;
    8069 
    8070  // 3. Prepare for next iteration.
    8071  lastOffset = suballoc.offset + suballoc.size;
    8072  ++nextAlloc2ndIndex;
    8073  }
    8074  // We are at the end.
    8075  else
    8076  {
    8077  if(lastOffset < freeSpace2ndTo1stEnd)
    8078  {
    8079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8080  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8081  inoutStats.unusedSize += unusedRangeSize;
    8082  ++inoutStats.unusedRangeCount;
    8083  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8084  }
    8085 
    8086  // End of loop.
    8087  lastOffset = freeSpace2ndTo1stEnd;
    8088  }
    8089  }
    8090  }
    8091 
    8092  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8093  const VkDeviceSize freeSpace1stTo2ndEnd =
    8094  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8095  while(lastOffset < freeSpace1stTo2ndEnd)
    8096  {
    8097  // Find next non-null allocation or move nextAllocIndex to the end.
    8098  while(nextAlloc1stIndex < suballoc1stCount &&
    8099  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8100  {
    8101  ++nextAlloc1stIndex;
    8102  }
    8103 
    8104  // Found non-null allocation.
    8105  if(nextAlloc1stIndex < suballoc1stCount)
    8106  {
    8107  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8108 
    8109  // 1. Process free space before this allocation.
    8110  if(lastOffset < suballoc.offset)
    8111  {
    8112  // There is free space from lastOffset to suballoc.offset.
    8113  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8114  inoutStats.unusedSize += unusedRangeSize;
    8115  ++inoutStats.unusedRangeCount;
    8116  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8117  }
    8118 
    8119  // 2. Process this allocation.
    8120  // There is allocation with suballoc.offset, suballoc.size.
    8121  ++inoutStats.allocationCount;
    8122 
    8123  // 3. Prepare for next iteration.
    8124  lastOffset = suballoc.offset + suballoc.size;
    8125  ++nextAlloc1stIndex;
    8126  }
    8127  // We are at the end.
    8128  else
    8129  {
    8130  if(lastOffset < freeSpace1stTo2ndEnd)
    8131  {
    8132  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8133  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8134  inoutStats.unusedSize += unusedRangeSize;
    8135  ++inoutStats.unusedRangeCount;
    8136  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8137  }
    8138 
    8139  // End of loop.
    8140  lastOffset = freeSpace1stTo2ndEnd;
    8141  }
    8142  }
    8143 
    8144  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8145  {
    8146  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8147  while(lastOffset < size)
    8148  {
    8149  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8150  while(nextAlloc2ndIndex != SIZE_MAX &&
    8151  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8152  {
    8153  --nextAlloc2ndIndex;
    8154  }
    8155 
    8156  // Found non-null allocation.
    8157  if(nextAlloc2ndIndex != SIZE_MAX)
    8158  {
    8159  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8160 
    8161  // 1. Process free space before this allocation.
    8162  if(lastOffset < suballoc.offset)
    8163  {
    8164  // There is free space from lastOffset to suballoc.offset.
    8165  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8166  inoutStats.unusedSize += unusedRangeSize;
    8167  ++inoutStats.unusedRangeCount;
    8168  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8169  }
    8170 
    8171  // 2. Process this allocation.
    8172  // There is allocation with suballoc.offset, suballoc.size.
    8173  ++inoutStats.allocationCount;
    8174 
    8175  // 3. Prepare for next iteration.
    8176  lastOffset = suballoc.offset + suballoc.size;
    8177  --nextAlloc2ndIndex;
    8178  }
    8179  // We are at the end.
    8180  else
    8181  {
    8182  if(lastOffset < size)
    8183  {
    8184  // There is free space from lastOffset to size.
    8185  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8186  inoutStats.unusedSize += unusedRangeSize;
    8187  ++inoutStats.unusedRangeCount;
    8188  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8189  }
    8190 
    8191  // End of loop.
    8192  lastOffset = size;
    8193  }
    8194  }
    8195  }
    8196 }
    8197 
    8198 #if VMA_STATS_STRING_ENABLED
    8199 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8200 {
    8201  const VkDeviceSize size = GetSize();
    8202  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8203  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8204  const size_t suballoc1stCount = suballocations1st.size();
    8205  const size_t suballoc2ndCount = suballocations2nd.size();
    8206 
    8207  // FIRST PASS
    8208 
    8209  size_t unusedRangeCount = 0;
    8210  VkDeviceSize usedBytes = 0;
    8211 
    8212  VkDeviceSize lastOffset = 0;
    8213 
    8214  size_t alloc2ndCount = 0;
    8215  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8216  {
    8217  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8218  size_t nextAlloc2ndIndex = 0;
    8219  while(lastOffset < freeSpace2ndTo1stEnd)
    8220  {
    8221  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8222  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8223  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8224  {
    8225  ++nextAlloc2ndIndex;
    8226  }
    8227 
    8228  // Found non-null allocation.
    8229  if(nextAlloc2ndIndex < suballoc2ndCount)
    8230  {
    8231  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8232 
    8233  // 1. Process free space before this allocation.
    8234  if(lastOffset < suballoc.offset)
    8235  {
    8236  // There is free space from lastOffset to suballoc.offset.
    8237  ++unusedRangeCount;
    8238  }
    8239 
    8240  // 2. Process this allocation.
    8241  // There is allocation with suballoc.offset, suballoc.size.
    8242  ++alloc2ndCount;
    8243  usedBytes += suballoc.size;
    8244 
    8245  // 3. Prepare for next iteration.
    8246  lastOffset = suballoc.offset + suballoc.size;
    8247  ++nextAlloc2ndIndex;
    8248  }
    8249  // We are at the end.
    8250  else
    8251  {
    8252  if(lastOffset < freeSpace2ndTo1stEnd)
    8253  {
    8254  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8255  ++unusedRangeCount;
    8256  }
    8257 
    8258  // End of loop.
    8259  lastOffset = freeSpace2ndTo1stEnd;
    8260  }
    8261  }
    8262  }
    8263 
    8264  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8265  size_t alloc1stCount = 0;
    8266  const VkDeviceSize freeSpace1stTo2ndEnd =
    8267  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8268  while(lastOffset < freeSpace1stTo2ndEnd)
    8269  {
    8270  // Find next non-null allocation or move nextAllocIndex to the end.
    8271  while(nextAlloc1stIndex < suballoc1stCount &&
    8272  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8273  {
    8274  ++nextAlloc1stIndex;
    8275  }
    8276 
    8277  // Found non-null allocation.
    8278  if(nextAlloc1stIndex < suballoc1stCount)
    8279  {
    8280  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8281 
    8282  // 1. Process free space before this allocation.
    8283  if(lastOffset < suballoc.offset)
    8284  {
    8285  // There is free space from lastOffset to suballoc.offset.
    8286  ++unusedRangeCount;
    8287  }
    8288 
    8289  // 2. Process this allocation.
    8290  // There is allocation with suballoc.offset, suballoc.size.
    8291  ++alloc1stCount;
    8292  usedBytes += suballoc.size;
    8293 
    8294  // 3. Prepare for next iteration.
    8295  lastOffset = suballoc.offset + suballoc.size;
    8296  ++nextAlloc1stIndex;
    8297  }
    8298  // We are at the end.
    8299  else
    8300  {
    8301  if(lastOffset < size)
    8302  {
    8303  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8304  ++unusedRangeCount;
    8305  }
    8306 
    8307  // End of loop.
    8308  lastOffset = freeSpace1stTo2ndEnd;
    8309  }
    8310  }
    8311 
    8312  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8313  {
    8314  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8315  while(lastOffset < size)
    8316  {
    8317  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8318  while(nextAlloc2ndIndex != SIZE_MAX &&
    8319  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8320  {
    8321  --nextAlloc2ndIndex;
    8322  }
    8323 
    8324  // Found non-null allocation.
    8325  if(nextAlloc2ndIndex != SIZE_MAX)
    8326  {
    8327  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8328 
    8329  // 1. Process free space before this allocation.
    8330  if(lastOffset < suballoc.offset)
    8331  {
    8332  // There is free space from lastOffset to suballoc.offset.
    8333  ++unusedRangeCount;
    8334  }
    8335 
    8336  // 2. Process this allocation.
    8337  // There is allocation with suballoc.offset, suballoc.size.
    8338  ++alloc2ndCount;
    8339  usedBytes += suballoc.size;
    8340 
    8341  // 3. Prepare for next iteration.
    8342  lastOffset = suballoc.offset + suballoc.size;
    8343  --nextAlloc2ndIndex;
    8344  }
    8345  // We are at the end.
    8346  else
    8347  {
    8348  if(lastOffset < size)
    8349  {
    8350  // There is free space from lastOffset to size.
    8351  ++unusedRangeCount;
    8352  }
    8353 
    8354  // End of loop.
    8355  lastOffset = size;
    8356  }
    8357  }
    8358  }
    8359 
    8360  const VkDeviceSize unusedBytes = size - usedBytes;
    8361  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8362 
    8363  // SECOND PASS
    8364  lastOffset = 0;
    8365 
    8366  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8367  {
    8368  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8369  size_t nextAlloc2ndIndex = 0;
    8370  while(lastOffset < freeSpace2ndTo1stEnd)
    8371  {
    8372  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8373  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8374  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8375  {
    8376  ++nextAlloc2ndIndex;
    8377  }
    8378 
    8379  // Found non-null allocation.
    8380  if(nextAlloc2ndIndex < suballoc2ndCount)
    8381  {
    8382  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8383 
    8384  // 1. Process free space before this allocation.
    8385  if(lastOffset < suballoc.offset)
    8386  {
    8387  // There is free space from lastOffset to suballoc.offset.
    8388  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8389  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8390  }
    8391 
    8392  // 2. Process this allocation.
    8393  // There is allocation with suballoc.offset, suballoc.size.
    8394  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8395 
    8396  // 3. Prepare for next iteration.
    8397  lastOffset = suballoc.offset + suballoc.size;
    8398  ++nextAlloc2ndIndex;
    8399  }
    8400  // We are at the end.
    8401  else
    8402  {
    8403  if(lastOffset < freeSpace2ndTo1stEnd)
    8404  {
    8405  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8406  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8407  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8408  }
    8409 
    8410  // End of loop.
    8411  lastOffset = freeSpace2ndTo1stEnd;
    8412  }
    8413  }
    8414  }
    8415 
    8416  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8417  while(lastOffset < freeSpace1stTo2ndEnd)
    8418  {
    8419  // Find next non-null allocation or move nextAllocIndex to the end.
    8420  while(nextAlloc1stIndex < suballoc1stCount &&
    8421  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8422  {
    8423  ++nextAlloc1stIndex;
    8424  }
    8425 
    8426  // Found non-null allocation.
    8427  if(nextAlloc1stIndex < suballoc1stCount)
    8428  {
    8429  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8430 
    8431  // 1. Process free space before this allocation.
    8432  if(lastOffset < suballoc.offset)
    8433  {
    8434  // There is free space from lastOffset to suballoc.offset.
    8435  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8436  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8437  }
    8438 
    8439  // 2. Process this allocation.
    8440  // There is allocation with suballoc.offset, suballoc.size.
    8441  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8442 
    8443  // 3. Prepare for next iteration.
    8444  lastOffset = suballoc.offset + suballoc.size;
    8445  ++nextAlloc1stIndex;
    8446  }
    8447  // We are at the end.
    8448  else
    8449  {
    8450  if(lastOffset < freeSpace1stTo2ndEnd)
    8451  {
    8452  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8453  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8454  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8455  }
    8456 
    8457  // End of loop.
    8458  lastOffset = freeSpace1stTo2ndEnd;
    8459  }
    8460  }
    8461 
    8462  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8463  {
    8464  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8465  while(lastOffset < size)
    8466  {
    8467  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8468  while(nextAlloc2ndIndex != SIZE_MAX &&
    8469  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8470  {
    8471  --nextAlloc2ndIndex;
    8472  }
    8473 
    8474  // Found non-null allocation.
    8475  if(nextAlloc2ndIndex != SIZE_MAX)
    8476  {
    8477  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8478 
    8479  // 1. Process free space before this allocation.
    8480  if(lastOffset < suballoc.offset)
    8481  {
    8482  // There is free space from lastOffset to suballoc.offset.
    8483  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8484  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8485  }
    8486 
    8487  // 2. Process this allocation.
    8488  // There is allocation with suballoc.offset, suballoc.size.
    8489  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8490 
    8491  // 3. Prepare for next iteration.
    8492  lastOffset = suballoc.offset + suballoc.size;
    8493  --nextAlloc2ndIndex;
    8494  }
    8495  // We are at the end.
    8496  else
    8497  {
    8498  if(lastOffset < size)
    8499  {
    8500  // There is free space from lastOffset to size.
    8501  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8502  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8503  }
    8504 
    8505  // End of loop.
    8506  lastOffset = size;
    8507  }
    8508  }
    8509  }
    8510 
    8511  PrintDetailedMap_End(json);
    8512 }
    8513 #endif // #if VMA_STATS_STRING_ENABLED
    8514 
    8515 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8516  uint32_t currentFrameIndex,
    8517  uint32_t frameInUseCount,
    8518  VkDeviceSize bufferImageGranularity,
    8519  VkDeviceSize allocSize,
    8520  VkDeviceSize allocAlignment,
    8521  bool upperAddress,
    8522  VmaSuballocationType allocType,
    8523  bool canMakeOtherLost,
    8524  uint32_t strategy,
    8525  VmaAllocationRequest* pAllocationRequest)
    8526 {
    8527  VMA_ASSERT(allocSize > 0);
    8528  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8529  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8530  VMA_HEAVY_ASSERT(Validate());
    8531 
    8532  const VkDeviceSize size = GetSize();
    8533  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8534  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8535 
    8536  if(upperAddress)
    8537  {
    8538  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8539  {
    8540  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8541  return false;
    8542  }
    8543 
    8544  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8545  if(allocSize > size)
    8546  {
    8547  return false;
    8548  }
    8549  VkDeviceSize resultBaseOffset = size - allocSize;
    8550  if(!suballocations2nd.empty())
    8551  {
    8552  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8553  resultBaseOffset = lastSuballoc.offset - allocSize;
    8554  if(allocSize > lastSuballoc.offset)
    8555  {
    8556  return false;
    8557  }
    8558  }
    8559 
    8560  // Start from offset equal to end of free space.
    8561  VkDeviceSize resultOffset = resultBaseOffset;
    8562 
    8563  // Apply VMA_DEBUG_MARGIN at the end.
    8564  if(VMA_DEBUG_MARGIN > 0)
    8565  {
    8566  if(resultOffset < VMA_DEBUG_MARGIN)
    8567  {
    8568  return false;
    8569  }
    8570  resultOffset -= VMA_DEBUG_MARGIN;
    8571  }
    8572 
    8573  // Apply alignment.
    8574  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8575 
    8576  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8577  // Make bigger alignment if necessary.
    8578  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8579  {
    8580  bool bufferImageGranularityConflict = false;
    8581  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8582  {
    8583  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8584  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8585  {
    8586  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8587  {
    8588  bufferImageGranularityConflict = true;
    8589  break;
    8590  }
    8591  }
    8592  else
    8593  // Already on previous page.
    8594  break;
    8595  }
    8596  if(bufferImageGranularityConflict)
    8597  {
    8598  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8599  }
    8600  }
    8601 
    8602  // There is enough free space.
    8603  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8604  suballocations1st.back().offset + suballocations1st.back().size :
    8605  0;
    8606  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8607  {
    8608  // Check previous suballocations for BufferImageGranularity conflicts.
    8609  // If conflict exists, allocation cannot be made here.
    8610  if(bufferImageGranularity > 1)
    8611  {
    8612  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8613  {
    8614  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8615  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8616  {
    8617  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8618  {
    8619  return false;
    8620  }
    8621  }
    8622  else
    8623  {
    8624  // Already on next page.
    8625  break;
    8626  }
    8627  }
    8628  }
    8629 
    8630  // All tests passed: Success.
    8631  pAllocationRequest->offset = resultOffset;
    8632  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8633  pAllocationRequest->sumItemSize = 0;
    8634  // pAllocationRequest->item unused.
    8635  pAllocationRequest->itemsToMakeLostCount = 0;
    8636  return true;
    8637  }
    8638  }
    8639  else // !upperAddress
    8640  {
    8641  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8642  {
    8643  // Try to allocate at the end of 1st vector.
    8644 
    8645  VkDeviceSize resultBaseOffset = 0;
    8646  if(!suballocations1st.empty())
    8647  {
    8648  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8649  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8650  }
    8651 
    8652  // Start from offset equal to beginning of free space.
    8653  VkDeviceSize resultOffset = resultBaseOffset;
    8654 
    8655  // Apply VMA_DEBUG_MARGIN at the beginning.
    8656  if(VMA_DEBUG_MARGIN > 0)
    8657  {
    8658  resultOffset += VMA_DEBUG_MARGIN;
    8659  }
    8660 
    8661  // Apply alignment.
    8662  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8663 
    8664  // Check previous suballocations for BufferImageGranularity conflicts.
    8665  // Make bigger alignment if necessary.
    8666  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8667  {
    8668  bool bufferImageGranularityConflict = false;
    8669  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8670  {
    8671  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8672  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8673  {
    8674  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8675  {
    8676  bufferImageGranularityConflict = true;
    8677  break;
    8678  }
    8679  }
    8680  else
    8681  // Already on previous page.
    8682  break;
    8683  }
    8684  if(bufferImageGranularityConflict)
    8685  {
    8686  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8687  }
    8688  }
    8689 
    8690  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8691  suballocations2nd.back().offset : size;
    8692 
    8693  // There is enough free space at the end after alignment.
    8694  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8695  {
    8696  // Check next suballocations for BufferImageGranularity conflicts.
    8697  // If conflict exists, allocation cannot be made here.
    8698  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8699  {
    8700  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8701  {
    8702  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8703  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8704  {
    8705  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8706  {
    8707  return false;
    8708  }
    8709  }
    8710  else
    8711  {
    8712  // Already on previous page.
    8713  break;
    8714  }
    8715  }
    8716  }
    8717 
    8718  // All tests passed: Success.
    8719  pAllocationRequest->offset = resultOffset;
    8720  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8721  pAllocationRequest->sumItemSize = 0;
    8722  // pAllocationRequest->item unused.
    8723  pAllocationRequest->itemsToMakeLostCount = 0;
    8724  return true;
    8725  }
    8726  }
    8727 
    8728  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8729  // beginning of 1st vector as the end of free space.
    8730  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8731  {
    8732  VMA_ASSERT(!suballocations1st.empty());
    8733 
    8734  VkDeviceSize resultBaseOffset = 0;
    8735  if(!suballocations2nd.empty())
    8736  {
    8737  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8738  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8739  }
    8740 
    8741  // Start from offset equal to beginning of free space.
    8742  VkDeviceSize resultOffset = resultBaseOffset;
    8743 
    8744  // Apply VMA_DEBUG_MARGIN at the beginning.
    8745  if(VMA_DEBUG_MARGIN > 0)
    8746  {
    8747  resultOffset += VMA_DEBUG_MARGIN;
    8748  }
    8749 
    8750  // Apply alignment.
    8751  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8752 
    8753  // Check previous suballocations for BufferImageGranularity conflicts.
    8754  // Make bigger alignment if necessary.
    8755  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8756  {
    8757  bool bufferImageGranularityConflict = false;
    8758  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8759  {
    8760  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8761  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8762  {
    8763  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8764  {
    8765  bufferImageGranularityConflict = true;
    8766  break;
    8767  }
    8768  }
    8769  else
    8770  // Already on previous page.
    8771  break;
    8772  }
    8773  if(bufferImageGranularityConflict)
    8774  {
    8775  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8776  }
    8777  }
    8778 
    8779  pAllocationRequest->itemsToMakeLostCount = 0;
    8780  pAllocationRequest->sumItemSize = 0;
    8781  size_t index1st = m_1stNullItemsBeginCount;
    8782 
    8783  if(canMakeOtherLost)
    8784  {
    8785  while(index1st < suballocations1st.size() &&
    8786  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8787  {
    8788  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8789  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8790  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8791  {
    8792  // No problem.
    8793  }
    8794  else
    8795  {
    8796  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8797  if(suballoc.hAllocation->CanBecomeLost() &&
    8798  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8799  {
    8800  ++pAllocationRequest->itemsToMakeLostCount;
    8801  pAllocationRequest->sumItemSize += suballoc.size;
    8802  }
    8803  else
    8804  {
    8805  return false;
    8806  }
    8807  }
    8808  ++index1st;
    8809  }
    8810 
    8811  // Check next suballocations for BufferImageGranularity conflicts.
    8812  // If conflict exists, we must mark more allocations lost or fail.
    8813  if(bufferImageGranularity > 1)
    8814  {
    8815  while(index1st < suballocations1st.size())
    8816  {
    8817  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8818  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8819  {
    8820  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8821  {
    8822  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8823  if(suballoc.hAllocation->CanBecomeLost() &&
    8824  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8825  {
    8826  ++pAllocationRequest->itemsToMakeLostCount;
    8827  pAllocationRequest->sumItemSize += suballoc.size;
    8828  }
    8829  else
    8830  {
    8831  return false;
    8832  }
    8833  }
    8834  }
    8835  else
    8836  {
    8837  // Already on next page.
    8838  break;
    8839  }
    8840  ++index1st;
    8841  }
    8842  }
    8843  }
    8844 
    8845  // There is enough free space at the end after alignment.
    8846  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8847  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8848  {
    8849  // Check next suballocations for BufferImageGranularity conflicts.
    8850  // If conflict exists, allocation cannot be made here.
    8851  if(bufferImageGranularity > 1)
    8852  {
    8853  for(size_t nextSuballocIndex = index1st;
    8854  nextSuballocIndex < suballocations1st.size();
    8855  nextSuballocIndex++)
    8856  {
    8857  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8858  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8859  {
    8860  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8861  {
    8862  return false;
    8863  }
    8864  }
    8865  else
    8866  {
    8867  // Already on next page.
    8868  break;
    8869  }
    8870  }
    8871  }
    8872 
    8873  // All tests passed: Success.
    8874  pAllocationRequest->offset = resultOffset;
    8875  pAllocationRequest->sumFreeSize =
    8876  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8877  - resultBaseOffset
    8878  - pAllocationRequest->sumItemSize;
    8879  // pAllocationRequest->item unused.
    8880  return true;
    8881  }
    8882  }
    8883  }
    8884 
    8885  return false;
    8886 }
    8887 
    8888 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8889  uint32_t currentFrameIndex,
    8890  uint32_t frameInUseCount,
    8891  VmaAllocationRequest* pAllocationRequest)
    8892 {
    8893  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8894  {
    8895  return true;
    8896  }
    8897 
    8898  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8899 
    8900  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8901  size_t index1st = m_1stNullItemsBeginCount;
    8902  size_t madeLostCount = 0;
    8903  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8904  {
    8905  VMA_ASSERT(index1st < suballocations1st.size());
    8906  VmaSuballocation& suballoc = suballocations1st[index1st];
    8907  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8908  {
    8909  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8910  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8911  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8912  {
    8913  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8914  suballoc.hAllocation = VK_NULL_HANDLE;
    8915  m_SumFreeSize += suballoc.size;
    8916  ++m_1stNullItemsMiddleCount;
    8917  ++madeLostCount;
    8918  }
    8919  else
    8920  {
    8921  return false;
    8922  }
    8923  }
    8924  ++index1st;
    8925  }
    8926 
    8927  CleanupAfterFree();
    8928  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8929 
    8930  return true;
    8931 }
    8932 
    8933 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8934 {
    8935  uint32_t lostAllocationCount = 0;
    8936 
    8937  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8938  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8939  {
    8940  VmaSuballocation& suballoc = suballocations1st[i];
    8941  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8942  suballoc.hAllocation->CanBecomeLost() &&
    8943  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8944  {
    8945  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8946  suballoc.hAllocation = VK_NULL_HANDLE;
    8947  ++m_1stNullItemsMiddleCount;
    8948  m_SumFreeSize += suballoc.size;
    8949  ++lostAllocationCount;
    8950  }
    8951  }
    8952 
    8953  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8954  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8955  {
    8956  VmaSuballocation& suballoc = suballocations2nd[i];
    8957  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8958  suballoc.hAllocation->CanBecomeLost() &&
    8959  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8960  {
    8961  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8962  suballoc.hAllocation = VK_NULL_HANDLE;
    8963  ++m_2ndNullItemsCount;
    8964  ++lostAllocationCount;
    8965  }
    8966  }
    8967 
    8968  if(lostAllocationCount)
    8969  {
    8970  CleanupAfterFree();
    8971  }
    8972 
    8973  return lostAllocationCount;
    8974 }
    8975 
    8976 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8977 {
    8978  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8979  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8980  {
    8981  const VmaSuballocation& suballoc = suballocations1st[i];
    8982  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8983  {
    8984  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8985  {
    8986  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8987  return VK_ERROR_VALIDATION_FAILED_EXT;
    8988  }
    8989  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8990  {
    8991  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8992  return VK_ERROR_VALIDATION_FAILED_EXT;
    8993  }
    8994  }
    8995  }
    8996 
    8997  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8998  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8999  {
    9000  const VmaSuballocation& suballoc = suballocations2nd[i];
    9001  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9002  {
    9003  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9004  {
    9005  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9006  return VK_ERROR_VALIDATION_FAILED_EXT;
    9007  }
    9008  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9009  {
    9010  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9011  return VK_ERROR_VALIDATION_FAILED_EXT;
    9012  }
    9013  }
    9014  }
    9015 
    9016  return VK_SUCCESS;
    9017 }
    9018 
    9019 void VmaBlockMetadata_Linear::Alloc(
    9020  const VmaAllocationRequest& request,
    9021  VmaSuballocationType type,
    9022  VkDeviceSize allocSize,
    9023  bool upperAddress,
    9024  VmaAllocation hAllocation)
    9025 {
    9026  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9027 
    9028  if(upperAddress)
    9029  {
    9030  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9031  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9032  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9033  suballocations2nd.push_back(newSuballoc);
    9034  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9035  }
    9036  else
    9037  {
    9038  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9039 
    9040  // First allocation.
    9041  if(suballocations1st.empty())
    9042  {
    9043  suballocations1st.push_back(newSuballoc);
    9044  }
    9045  else
    9046  {
    9047  // New allocation at the end of 1st vector.
    9048  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9049  {
    9050  // Check if it fits before the end of the block.
    9051  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9052  suballocations1st.push_back(newSuballoc);
    9053  }
    9054  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9055  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9056  {
    9057  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9058 
    9059  switch(m_2ndVectorMode)
    9060  {
    9061  case SECOND_VECTOR_EMPTY:
    9062  // First allocation from second part ring buffer.
    9063  VMA_ASSERT(suballocations2nd.empty());
    9064  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9065  break;
    9066  case SECOND_VECTOR_RING_BUFFER:
    9067  // 2-part ring buffer is already started.
    9068  VMA_ASSERT(!suballocations2nd.empty());
    9069  break;
    9070  case SECOND_VECTOR_DOUBLE_STACK:
    9071  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9072  break;
    9073  default:
    9074  VMA_ASSERT(0);
    9075  }
    9076 
    9077  suballocations2nd.push_back(newSuballoc);
    9078  }
    9079  else
    9080  {
    9081  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9082  }
    9083  }
    9084  }
    9085 
    9086  m_SumFreeSize -= newSuballoc.size;
    9087 }
    9088 
    9089 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9090 {
    9091  FreeAtOffset(allocation->GetOffset());
    9092 }
    9093 
    9094 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9095 {
    9096  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9097  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9098 
    9099  if(!suballocations1st.empty())
    9100  {
    9101  // First allocation: Mark it as next empty at the beginning.
    9102  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9103  if(firstSuballoc.offset == offset)
    9104  {
    9105  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9106  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9107  m_SumFreeSize += firstSuballoc.size;
    9108  ++m_1stNullItemsBeginCount;
    9109  CleanupAfterFree();
    9110  return;
    9111  }
    9112  }
    9113 
    9114  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9115  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9117  {
    9118  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9119  if(lastSuballoc.offset == offset)
    9120  {
    9121  m_SumFreeSize += lastSuballoc.size;
    9122  suballocations2nd.pop_back();
    9123  CleanupAfterFree();
    9124  return;
    9125  }
    9126  }
    9127  // Last allocation in 1st vector.
    9128  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9129  {
    9130  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9131  if(lastSuballoc.offset == offset)
    9132  {
    9133  m_SumFreeSize += lastSuballoc.size;
    9134  suballocations1st.pop_back();
    9135  CleanupAfterFree();
    9136  return;
    9137  }
    9138  }
    9139 
    9140  // Item from the middle of 1st vector.
    9141  {
    9142  VmaSuballocation refSuballoc;
    9143  refSuballoc.offset = offset;
    9144  // Rest of members stays uninitialized intentionally for better performance.
    9145  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9146  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9147  suballocations1st.end(),
    9148  refSuballoc);
    9149  if(it != suballocations1st.end())
    9150  {
    9151  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9152  it->hAllocation = VK_NULL_HANDLE;
    9153  ++m_1stNullItemsMiddleCount;
    9154  m_SumFreeSize += it->size;
    9155  CleanupAfterFree();
    9156  return;
    9157  }
    9158  }
    9159 
    9160  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9161  {
    9162  // Item from the middle of 2nd vector.
    9163  VmaSuballocation refSuballoc;
    9164  refSuballoc.offset = offset;
    9165  // Rest of members stays uninitialized intentionally for better performance.
    9166  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9167  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9168  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9169  if(it != suballocations2nd.end())
    9170  {
    9171  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9172  it->hAllocation = VK_NULL_HANDLE;
    9173  ++m_2ndNullItemsCount;
    9174  m_SumFreeSize += it->size;
    9175  CleanupAfterFree();
    9176  return;
    9177  }
    9178  }
    9179 
    9180  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9181 }
    9182 
    9183 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9184 {
    9185  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9186  const size_t suballocCount = AccessSuballocations1st().size();
    9187  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9188 }
    9189 
    9190 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9191 {
    9192  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9193  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9194 
    9195  if(IsEmpty())
    9196  {
    9197  suballocations1st.clear();
    9198  suballocations2nd.clear();
    9199  m_1stNullItemsBeginCount = 0;
    9200  m_1stNullItemsMiddleCount = 0;
    9201  m_2ndNullItemsCount = 0;
    9202  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9203  }
    9204  else
    9205  {
    9206  const size_t suballoc1stCount = suballocations1st.size();
    9207  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9208  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9209 
    9210  // Find more null items at the beginning of 1st vector.
    9211  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9212  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9213  {
    9214  ++m_1stNullItemsBeginCount;
    9215  --m_1stNullItemsMiddleCount;
    9216  }
    9217 
    9218  // Find more null items at the end of 1st vector.
    9219  while(m_1stNullItemsMiddleCount > 0 &&
    9220  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9221  {
    9222  --m_1stNullItemsMiddleCount;
    9223  suballocations1st.pop_back();
    9224  }
    9225 
    9226  // Find more null items at the end of 2nd vector.
    9227  while(m_2ndNullItemsCount > 0 &&
    9228  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --m_2ndNullItemsCount;
    9231  suballocations2nd.pop_back();
    9232  }
    9233 
    9234  if(ShouldCompact1st())
    9235  {
    9236  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9237  size_t srcIndex = m_1stNullItemsBeginCount;
    9238  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9239  {
    9240  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9241  {
    9242  ++srcIndex;
    9243  }
    9244  if(dstIndex != srcIndex)
    9245  {
    9246  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9247  }
    9248  ++srcIndex;
    9249  }
    9250  suballocations1st.resize(nonNullItemCount);
    9251  m_1stNullItemsBeginCount = 0;
    9252  m_1stNullItemsMiddleCount = 0;
    9253  }
    9254 
    9255  // 2nd vector became empty.
    9256  if(suballocations2nd.empty())
    9257  {
    9258  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9259  }
    9260 
    9261  // 1st vector became empty.
    9262  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9263  {
    9264  suballocations1st.clear();
    9265  m_1stNullItemsBeginCount = 0;
    9266 
    9267  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9268  {
    9269  // Swap 1st with 2nd. Now 2nd is empty.
    9270  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9271  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9272  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9273  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9274  {
    9275  ++m_1stNullItemsBeginCount;
    9276  --m_1stNullItemsMiddleCount;
    9277  }
    9278  m_2ndNullItemsCount = 0;
    9279  m_1stVectorIndex ^= 1;
    9280  }
    9281  }
    9282  }
    9283 
    9284  VMA_HEAVY_ASSERT(Validate());
    9285 }
    9286 
    9287 
    9289 // class VmaBlockMetadata_Buddy
    9290 
    9291 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9292  VmaBlockMetadata(hAllocator),
    9293  m_Root(VMA_NULL),
    9294  m_AllocationCount(0),
    9295  m_FreeCount(1),
    9296  m_SumFreeSize(0)
    9297 {
    9298  memset(m_FreeList, 0, sizeof(m_FreeList));
    9299 }
    9300 
    9301 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9302 {
    9303  DeleteNode(m_Root);
    9304 }
    9305 
    9306 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9307 {
    9308  VmaBlockMetadata::Init(size);
    9309 
    9310  m_UsableSize = VmaPrevPow2(size);
    9311  m_SumFreeSize = m_UsableSize;
    9312 
    9313  // Calculate m_LevelCount.
    9314  m_LevelCount = 1;
    9315  while(m_LevelCount < MAX_LEVELS &&
    9316  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9317  {
    9318  ++m_LevelCount;
    9319  }
    9320 
    9321  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9322  rootNode->offset = 0;
    9323  rootNode->type = Node::TYPE_FREE;
    9324  rootNode->parent = VMA_NULL;
    9325  rootNode->buddy = VMA_NULL;
    9326 
    9327  m_Root = rootNode;
    9328  AddToFreeListFront(0, rootNode);
    9329 }
    9330 
    9331 bool VmaBlockMetadata_Buddy::Validate() const
    9332 {
    9333  // Validate tree.
    9334  ValidationContext ctx;
    9335  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9336  {
    9337  VMA_VALIDATE(false && "ValidateNode failed.");
    9338  }
    9339  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9340  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9341 
    9342  // Validate free node lists.
    9343  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9344  {
    9345  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9346  m_FreeList[level].front->free.prev == VMA_NULL);
    9347 
    9348  for(Node* node = m_FreeList[level].front;
    9349  node != VMA_NULL;
    9350  node = node->free.next)
    9351  {
    9352  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9353 
    9354  if(node->free.next == VMA_NULL)
    9355  {
    9356  VMA_VALIDATE(m_FreeList[level].back == node);
    9357  }
    9358  else
    9359  {
    9360  VMA_VALIDATE(node->free.next->free.prev == node);
    9361  }
    9362  }
    9363  }
    9364 
    9365  // Validate that free lists ar higher levels are empty.
    9366  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9367  {
    9368  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9369  }
    9370 
    9371  return true;
    9372 }
    9373 
    9374 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9375 {
    9376  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9377  {
    9378  if(m_FreeList[level].front != VMA_NULL)
    9379  {
    9380  return LevelToNodeSize(level);
    9381  }
    9382  }
    9383  return 0;
    9384 }
    9385 
    9386 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9387 {
    9388  const VkDeviceSize unusableSize = GetUnusableSize();
    9389 
    9390  outInfo.blockCount = 1;
    9391 
    9392  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9393  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9394 
    9395  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9396  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9397  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9398 
    9399  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9400 
    9401  if(unusableSize > 0)
    9402  {
    9403  ++outInfo.unusedRangeCount;
    9404  outInfo.unusedBytes += unusableSize;
    9405  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9406  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9407  }
    9408 }
    9409 
    9410 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9411 {
    9412  const VkDeviceSize unusableSize = GetUnusableSize();
    9413 
    9414  inoutStats.size += GetSize();
    9415  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9416  inoutStats.allocationCount += m_AllocationCount;
    9417  inoutStats.unusedRangeCount += m_FreeCount;
    9418  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9419 
    9420  if(unusableSize > 0)
    9421  {
    9422  ++inoutStats.unusedRangeCount;
    9423  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9424  }
    9425 }
    9426 
    9427 #if VMA_STATS_STRING_ENABLED
    9428 
    9429 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9430 {
    9431  // TODO optimize
    9432  VmaStatInfo stat;
    9433  CalcAllocationStatInfo(stat);
    9434 
    9435  PrintDetailedMap_Begin(
    9436  json,
    9437  stat.unusedBytes,
    9438  stat.allocationCount,
    9439  stat.unusedRangeCount);
    9440 
    9441  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9442 
    9443  const VkDeviceSize unusableSize = GetUnusableSize();
    9444  if(unusableSize > 0)
    9445  {
    9446  PrintDetailedMap_UnusedRange(json,
    9447  m_UsableSize, // offset
    9448  unusableSize); // size
    9449  }
    9450 
    9451  PrintDetailedMap_End(json);
    9452 }
    9453 
    9454 #endif // #if VMA_STATS_STRING_ENABLED
    9455 
    9456 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9457  uint32_t currentFrameIndex,
    9458  uint32_t frameInUseCount,
    9459  VkDeviceSize bufferImageGranularity,
    9460  VkDeviceSize allocSize,
    9461  VkDeviceSize allocAlignment,
    9462  bool upperAddress,
    9463  VmaSuballocationType allocType,
    9464  bool canMakeOtherLost,
    9465  uint32_t strategy,
    9466  VmaAllocationRequest* pAllocationRequest)
    9467 {
    9468  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9469 
    9470  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9471  // Whenever it might be an OPTIMAL image...
    9472  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9473  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9474  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9475  {
    9476  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9477  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9478  }
    9479 
    9480  if(allocSize > m_UsableSize)
    9481  {
    9482  return false;
    9483  }
    9484 
    9485  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9486  for(uint32_t level = targetLevel + 1; level--; )
    9487  {
    9488  for(Node* freeNode = m_FreeList[level].front;
    9489  freeNode != VMA_NULL;
    9490  freeNode = freeNode->free.next)
    9491  {
    9492  if(freeNode->offset % allocAlignment == 0)
    9493  {
    9494  pAllocationRequest->offset = freeNode->offset;
    9495  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9496  pAllocationRequest->sumItemSize = 0;
    9497  pAllocationRequest->itemsToMakeLostCount = 0;
    9498  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9499  return true;
    9500  }
    9501  }
    9502  }
    9503 
    9504  return false;
    9505 }
    9506 
    9507 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9508  uint32_t currentFrameIndex,
    9509  uint32_t frameInUseCount,
    9510  VmaAllocationRequest* pAllocationRequest)
    9511 {
    9512  /*
    9513  Lost allocations are not supported in buddy allocator at the moment.
    9514  Support might be added in the future.
    9515  */
    9516  return pAllocationRequest->itemsToMakeLostCount == 0;
    9517 }
    9518 
    9519 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9520 {
    9521  /*
    9522  Lost allocations are not supported in buddy allocator at the moment.
    9523  Support might be added in the future.
    9524  */
    9525  return 0;
    9526 }
    9527 
    9528 void VmaBlockMetadata_Buddy::Alloc(
    9529  const VmaAllocationRequest& request,
    9530  VmaSuballocationType type,
    9531  VkDeviceSize allocSize,
    9532  bool upperAddress,
    9533  VmaAllocation hAllocation)
    9534 {
    9535  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9536  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9537 
    9538  Node* currNode = m_FreeList[currLevel].front;
    9539  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9540  while(currNode->offset != request.offset)
    9541  {
    9542  currNode = currNode->free.next;
    9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9544  }
    9545 
    9546  // Go down, splitting free nodes.
    9547  while(currLevel < targetLevel)
    9548  {
    9549  // currNode is already first free node at currLevel.
    9550  // Remove it from list of free nodes at this currLevel.
    9551  RemoveFromFreeList(currLevel, currNode);
    9552 
    9553  const uint32_t childrenLevel = currLevel + 1;
    9554 
    9555  // Create two free sub-nodes.
    9556  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9557  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9558 
    9559  leftChild->offset = currNode->offset;
    9560  leftChild->type = Node::TYPE_FREE;
    9561  leftChild->parent = currNode;
    9562  leftChild->buddy = rightChild;
    9563 
    9564  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9565  rightChild->type = Node::TYPE_FREE;
    9566  rightChild->parent = currNode;
    9567  rightChild->buddy = leftChild;
    9568 
    9569  // Convert current currNode to split type.
    9570  currNode->type = Node::TYPE_SPLIT;
    9571  currNode->split.leftChild = leftChild;
    9572 
    9573  // Add child nodes to free list. Order is important!
    9574  AddToFreeListFront(childrenLevel, rightChild);
    9575  AddToFreeListFront(childrenLevel, leftChild);
    9576 
    9577  ++m_FreeCount;
    9578  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9579  ++currLevel;
    9580  currNode = m_FreeList[currLevel].front;
    9581 
    9582  /*
    9583  We can be sure that currNode, as left child of node previously split,
    9584  also fullfills the alignment requirement.
    9585  */
    9586  }
    9587 
    9588  // Remove from free list.
    9589  VMA_ASSERT(currLevel == targetLevel &&
    9590  currNode != VMA_NULL &&
    9591  currNode->type == Node::TYPE_FREE);
    9592  RemoveFromFreeList(currLevel, currNode);
    9593 
    9594  // Convert to allocation node.
    9595  currNode->type = Node::TYPE_ALLOCATION;
    9596  currNode->allocation.alloc = hAllocation;
    9597 
    9598  ++m_AllocationCount;
    9599  --m_FreeCount;
    9600  m_SumFreeSize -= allocSize;
    9601 }
    9602 
    9603 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9604 {
    9605  if(node->type == Node::TYPE_SPLIT)
    9606  {
    9607  DeleteNode(node->split.leftChild->buddy);
    9608  DeleteNode(node->split.leftChild);
    9609  }
    9610 
    9611  vma_delete(GetAllocationCallbacks(), node);
    9612 }
    9613 
    9614 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9615 {
    9616  VMA_VALIDATE(level < m_LevelCount);
    9617  VMA_VALIDATE(curr->parent == parent);
    9618  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9619  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9620  switch(curr->type)
    9621  {
    9622  case Node::TYPE_FREE:
    9623  // curr->free.prev, next are validated separately.
    9624  ctx.calculatedSumFreeSize += levelNodeSize;
    9625  ++ctx.calculatedFreeCount;
    9626  break;
    9627  case Node::TYPE_ALLOCATION:
    9628  ++ctx.calculatedAllocationCount;
    9629  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9630  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9631  break;
    9632  case Node::TYPE_SPLIT:
    9633  {
    9634  const uint32_t childrenLevel = level + 1;
    9635  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9636  const Node* const leftChild = curr->split.leftChild;
    9637  VMA_VALIDATE(leftChild != VMA_NULL);
    9638  VMA_VALIDATE(leftChild->offset == curr->offset);
    9639  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9640  {
    9641  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9642  }
    9643  const Node* const rightChild = leftChild->buddy;
    9644  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9645  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9646  {
    9647  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9648  }
    9649  }
    9650  break;
    9651  default:
    9652  return false;
    9653  }
    9654 
    9655  return true;
    9656 }
    9657 
    9658 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9659 {
    9660  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9661  uint32_t level = 0;
    9662  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9663  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9664  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9665  {
    9666  ++level;
    9667  currLevelNodeSize = nextLevelNodeSize;
    9668  nextLevelNodeSize = currLevelNodeSize >> 1;
    9669  }
    9670  return level;
    9671 }
    9672 
    9673 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9674 {
    9675  // Find node and level.
    9676  Node* node = m_Root;
    9677  VkDeviceSize nodeOffset = 0;
    9678  uint32_t level = 0;
    9679  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9680  while(node->type == Node::TYPE_SPLIT)
    9681  {
    9682  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9683  if(offset < nodeOffset + nextLevelSize)
    9684  {
    9685  node = node->split.leftChild;
    9686  }
    9687  else
    9688  {
    9689  node = node->split.leftChild->buddy;
    9690  nodeOffset += nextLevelSize;
    9691  }
    9692  ++level;
    9693  levelNodeSize = nextLevelSize;
    9694  }
    9695 
    9696  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9697  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9698 
    9699  ++m_FreeCount;
    9700  --m_AllocationCount;
    9701  m_SumFreeSize += alloc->GetSize();
    9702 
    9703  node->type = Node::TYPE_FREE;
    9704 
    9705  // Join free nodes if possible.
    9706  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9707  {
    9708  RemoveFromFreeList(level, node->buddy);
    9709  Node* const parent = node->parent;
    9710 
    9711  vma_delete(GetAllocationCallbacks(), node->buddy);
    9712  vma_delete(GetAllocationCallbacks(), node);
    9713  parent->type = Node::TYPE_FREE;
    9714 
    9715  node = parent;
    9716  --level;
    9717  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9718  --m_FreeCount;
    9719  }
    9720 
    9721  AddToFreeListFront(level, node);
    9722 }
    9723 
    9724 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9725 {
    9726  switch(node->type)
    9727  {
    9728  case Node::TYPE_FREE:
    9729  ++outInfo.unusedRangeCount;
    9730  outInfo.unusedBytes += levelNodeSize;
    9731  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9732  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9733  break;
    9734  case Node::TYPE_ALLOCATION:
    9735  {
    9736  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9737  ++outInfo.allocationCount;
    9738  outInfo.usedBytes += allocSize;
    9739  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9740  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9741 
    9742  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9743  if(unusedRangeSize > 0)
    9744  {
    9745  ++outInfo.unusedRangeCount;
    9746  outInfo.unusedBytes += unusedRangeSize;
    9747  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9748  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9749  }
    9750  }
    9751  break;
    9752  case Node::TYPE_SPLIT:
    9753  {
    9754  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9755  const Node* const leftChild = node->split.leftChild;
    9756  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9757  const Node* const rightChild = leftChild->buddy;
    9758  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9759  }
    9760  break;
    9761  default:
    9762  VMA_ASSERT(0);
    9763  }
    9764 }
    9765 
    9766 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9767 {
    9768  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9769 
    9770  // List is empty.
    9771  Node* const frontNode = m_FreeList[level].front;
    9772  if(frontNode == VMA_NULL)
    9773  {
    9774  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9775  node->free.prev = node->free.next = VMA_NULL;
    9776  m_FreeList[level].front = m_FreeList[level].back = node;
    9777  }
    9778  else
    9779  {
    9780  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9781  node->free.prev = VMA_NULL;
    9782  node->free.next = frontNode;
    9783  frontNode->free.prev = node;
    9784  m_FreeList[level].front = node;
    9785  }
    9786 }
    9787 
    9788 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9789 {
    9790  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9791 
    9792  // It is at the front.
    9793  if(node->free.prev == VMA_NULL)
    9794  {
    9795  VMA_ASSERT(m_FreeList[level].front == node);
    9796  m_FreeList[level].front = node->free.next;
    9797  }
    9798  else
    9799  {
    9800  Node* const prevFreeNode = node->free.prev;
    9801  VMA_ASSERT(prevFreeNode->free.next == node);
    9802  prevFreeNode->free.next = node->free.next;
    9803  }
    9804 
    9805  // It is at the back.
    9806  if(node->free.next == VMA_NULL)
    9807  {
    9808  VMA_ASSERT(m_FreeList[level].back == node);
    9809  m_FreeList[level].back = node->free.prev;
    9810  }
    9811  else
    9812  {
    9813  Node* const nextFreeNode = node->free.next;
    9814  VMA_ASSERT(nextFreeNode->free.prev == node);
    9815  nextFreeNode->free.prev = node->free.prev;
    9816  }
    9817 }
    9818 
    9819 #if VMA_STATS_STRING_ENABLED
    9820 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9821 {
    9822  switch(node->type)
    9823  {
    9824  case Node::TYPE_FREE:
    9825  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9826  break;
    9827  case Node::TYPE_ALLOCATION:
    9828  {
    9829  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9830  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9831  if(allocSize < levelNodeSize)
    9832  {
    9833  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9834  }
    9835  }
    9836  break;
    9837  case Node::TYPE_SPLIT:
    9838  {
    9839  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9840  const Node* const leftChild = node->split.leftChild;
    9841  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9842  const Node* const rightChild = leftChild->buddy;
    9843  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9844  }
    9845  break;
    9846  default:
    9847  VMA_ASSERT(0);
    9848  }
    9849 }
    9850 #endif // #if VMA_STATS_STRING_ENABLED
    9851 
    9852 
    9854 // class VmaDeviceMemoryBlock
    9855 
    9856 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9857  m_pMetadata(VMA_NULL),
    9858  m_MemoryTypeIndex(UINT32_MAX),
    9859  m_Id(0),
    9860  m_hMemory(VK_NULL_HANDLE),
    9861  m_MapCount(0),
    9862  m_pMappedData(VMA_NULL)
    9863 {
    9864 }
    9865 
    9866 void VmaDeviceMemoryBlock::Init(
    9867  VmaAllocator hAllocator,
    9868  uint32_t newMemoryTypeIndex,
    9869  VkDeviceMemory newMemory,
    9870  VkDeviceSize newSize,
    9871  uint32_t id,
    9872  uint32_t algorithm)
    9873 {
    9874  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9875 
    9876  m_MemoryTypeIndex = newMemoryTypeIndex;
    9877  m_Id = id;
    9878  m_hMemory = newMemory;
    9879 
    9880  switch(algorithm)
    9881  {
    9883  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9884  break;
    9886  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9887  break;
    9888  default:
    9889  VMA_ASSERT(0);
    9890  // Fall-through.
    9891  case 0:
    9892  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9893  }
    9894  m_pMetadata->Init(newSize);
    9895 }
    9896 
    9897 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9898 {
    9899  // This is the most important assert in the entire library.
    9900  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9901  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9902 
    9903  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9904  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9905  m_hMemory = VK_NULL_HANDLE;
    9906 
    9907  vma_delete(allocator, m_pMetadata);
    9908  m_pMetadata = VMA_NULL;
    9909 }
    9910 
    9911 bool VmaDeviceMemoryBlock::Validate() const
    9912 {
    9913  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9914  (m_pMetadata->GetSize() != 0));
    9915 
    9916  return m_pMetadata->Validate();
    9917 }
    9918 
    9919 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9920 {
    9921  void* pData = nullptr;
    9922  VkResult res = Map(hAllocator, 1, &pData);
    9923  if(res != VK_SUCCESS)
    9924  {
    9925  return res;
    9926  }
    9927 
    9928  res = m_pMetadata->CheckCorruption(pData);
    9929 
    9930  Unmap(hAllocator, 1);
    9931 
    9932  return res;
    9933 }
    9934 
    9935 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9936 {
    9937  if(count == 0)
    9938  {
    9939  return VK_SUCCESS;
    9940  }
    9941 
    9942  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9943  if(m_MapCount != 0)
    9944  {
    9945  m_MapCount += count;
    9946  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9947  if(ppData != VMA_NULL)
    9948  {
    9949  *ppData = m_pMappedData;
    9950  }
    9951  return VK_SUCCESS;
    9952  }
    9953  else
    9954  {
    9955  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9956  hAllocator->m_hDevice,
    9957  m_hMemory,
    9958  0, // offset
    9959  VK_WHOLE_SIZE,
    9960  0, // flags
    9961  &m_pMappedData);
    9962  if(result == VK_SUCCESS)
    9963  {
    9964  if(ppData != VMA_NULL)
    9965  {
    9966  *ppData = m_pMappedData;
    9967  }
    9968  m_MapCount = count;
    9969  }
    9970  return result;
    9971  }
    9972 }
    9973 
    9974 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9975 {
    9976  if(count == 0)
    9977  {
    9978  return;
    9979  }
    9980 
    9981  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9982  if(m_MapCount >= count)
    9983  {
    9984  m_MapCount -= count;
    9985  if(m_MapCount == 0)
    9986  {
    9987  m_pMappedData = VMA_NULL;
    9988  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9989  }
    9990  }
    9991  else
    9992  {
    9993  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    9994  }
    9995 }
    9996 
    9997 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    9998 {
    9999  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10000  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10001 
    10002  void* pData;
    10003  VkResult res = Map(hAllocator, 1, &pData);
    10004  if(res != VK_SUCCESS)
    10005  {
    10006  return res;
    10007  }
    10008 
    10009  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10010  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10011 
    10012  Unmap(hAllocator, 1);
    10013 
    10014  return VK_SUCCESS;
    10015 }
    10016 
    10017 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10018 {
    10019  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10020  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10021 
    10022  void* pData;
    10023  VkResult res = Map(hAllocator, 1, &pData);
    10024  if(res != VK_SUCCESS)
    10025  {
    10026  return res;
    10027  }
    10028 
    10029  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10030  {
    10031  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10032  }
    10033  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10034  {
    10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10036  }
    10037 
    10038  Unmap(hAllocator, 1);
    10039 
    10040  return VK_SUCCESS;
    10041 }
    10042 
    10043 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10044  const VmaAllocator hAllocator,
    10045  const VmaAllocation hAllocation,
    10046  VkBuffer hBuffer)
    10047 {
    10048  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10049  hAllocation->GetBlock() == this);
    10050  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10051  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10052  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10053  hAllocator->m_hDevice,
    10054  hBuffer,
    10055  m_hMemory,
    10056  hAllocation->GetOffset());
    10057 }
    10058 
    10059 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10060  const VmaAllocator hAllocator,
    10061  const VmaAllocation hAllocation,
    10062  VkImage hImage)
    10063 {
    10064  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10065  hAllocation->GetBlock() == this);
    10066  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10067  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10068  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10069  hAllocator->m_hDevice,
    10070  hImage,
    10071  m_hMemory,
    10072  hAllocation->GetOffset());
    10073 }
    10074 
    10075 static void InitStatInfo(VmaStatInfo& outInfo)
    10076 {
    10077  memset(&outInfo, 0, sizeof(outInfo));
    10078  outInfo.allocationSizeMin = UINT64_MAX;
    10079  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10080 }
    10081 
    10082 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10083 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10084 {
    10085  inoutInfo.blockCount += srcInfo.blockCount;
    10086  inoutInfo.allocationCount += srcInfo.allocationCount;
    10087  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10088  inoutInfo.usedBytes += srcInfo.usedBytes;
    10089  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10090  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10091  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10092  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10093  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10094 }
    10095 
    10096 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10097 {
    10098  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10099  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10100  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10101  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10102 }
    10103 
    10104 VmaPool_T::VmaPool_T(
    10105  VmaAllocator hAllocator,
    10106  const VmaPoolCreateInfo& createInfo,
    10107  VkDeviceSize preferredBlockSize) :
    10108  m_BlockVector(
    10109  hAllocator,
    10110  createInfo.memoryTypeIndex,
    10111  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10112  createInfo.minBlockCount,
    10113  createInfo.maxBlockCount,
    10114  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10115  createInfo.frameInUseCount,
    10116  true, // isCustomPool
    10117  createInfo.blockSize != 0, // explicitBlockSize
    10118  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10119  m_Id(0)
    10120 {
    10121 }
    10122 
    10123 VmaPool_T::~VmaPool_T()
    10124 {
    10125 }
    10126 
    10127 #if VMA_STATS_STRING_ENABLED
    10128 
    10129 #endif // #if VMA_STATS_STRING_ENABLED
    10130 
    10131 VmaBlockVector::VmaBlockVector(
    10132  VmaAllocator hAllocator,
    10133  uint32_t memoryTypeIndex,
    10134  VkDeviceSize preferredBlockSize,
    10135  size_t minBlockCount,
    10136  size_t maxBlockCount,
    10137  VkDeviceSize bufferImageGranularity,
    10138  uint32_t frameInUseCount,
    10139  bool isCustomPool,
    10140  bool explicitBlockSize,
    10141  uint32_t algorithm) :
    10142  m_hAllocator(hAllocator),
    10143  m_MemoryTypeIndex(memoryTypeIndex),
    10144  m_PreferredBlockSize(preferredBlockSize),
    10145  m_MinBlockCount(minBlockCount),
    10146  m_MaxBlockCount(maxBlockCount),
    10147  m_BufferImageGranularity(bufferImageGranularity),
    10148  m_FrameInUseCount(frameInUseCount),
    10149  m_IsCustomPool(isCustomPool),
    10150  m_ExplicitBlockSize(explicitBlockSize),
    10151  m_Algorithm(algorithm),
    10152  m_HasEmptyBlock(false),
    10153  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10154  m_pDefragmentator(VMA_NULL),
    10155  m_NextBlockId(0)
    10156 {
    10157 }
    10158 
    10159 VmaBlockVector::~VmaBlockVector()
    10160 {
    10161  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10162 
    10163  for(size_t i = m_Blocks.size(); i--; )
    10164  {
    10165  m_Blocks[i]->Destroy(m_hAllocator);
    10166  vma_delete(m_hAllocator, m_Blocks[i]);
    10167  }
    10168 }
    10169 
    10170 VkResult VmaBlockVector::CreateMinBlocks()
    10171 {
    10172  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10173  {
    10174  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10175  if(res != VK_SUCCESS)
    10176  {
    10177  return res;
    10178  }
    10179  }
    10180  return VK_SUCCESS;
    10181 }
    10182 
    10183 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10184 {
    10185  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10186 
    10187  const size_t blockCount = m_Blocks.size();
    10188 
    10189  pStats->size = 0;
    10190  pStats->unusedSize = 0;
    10191  pStats->allocationCount = 0;
    10192  pStats->unusedRangeCount = 0;
    10193  pStats->unusedRangeSizeMax = 0;
    10194  pStats->blockCount = blockCount;
    10195 
    10196  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10197  {
    10198  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10199  VMA_ASSERT(pBlock);
    10200  VMA_HEAVY_ASSERT(pBlock->Validate());
    10201  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10202  }
    10203 }
    10204 
    10205 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10206 {
    10207  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10208  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10209  (VMA_DEBUG_MARGIN > 0) &&
    10210  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10211 }
    10212 
    10213 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10214 
    10215 VkResult VmaBlockVector::Allocate(
    10216  VmaPool hCurrentPool,
    10217  uint32_t currentFrameIndex,
    10218  VkDeviceSize size,
    10219  VkDeviceSize alignment,
    10220  const VmaAllocationCreateInfo& createInfo,
    10221  VmaSuballocationType suballocType,
    10222  VmaAllocation* pAllocation)
    10223 {
    10224  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10225  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10226  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10227  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10228  const bool canCreateNewBlock =
    10229  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10230  (m_Blocks.size() < m_MaxBlockCount);
    10231  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10232 
    10233  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10234  // Which in turn is available only when maxBlockCount = 1.
    10235  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10236  {
    10237  canMakeOtherLost = false;
    10238  }
    10239 
    10240  // Upper address can only be used with linear allocator and within single memory block.
    10241  if(isUpperAddress &&
    10242  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10243  {
    10244  return VK_ERROR_FEATURE_NOT_PRESENT;
    10245  }
    10246 
    10247  // Validate strategy.
    10248  switch(strategy)
    10249  {
    10250  case 0:
    10252  break;
    10256  break;
    10257  default:
    10258  return VK_ERROR_FEATURE_NOT_PRESENT;
    10259  }
    10260 
    10261  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10262  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10263  {
    10264  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10265  }
    10266 
    10267  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10268 
    10269  /*
    10270  Under certain condition, this whole section can be skipped for optimization, so
    10271  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10272  e.g. for custom pools with linear algorithm.
    10273  */
    10274  if(!canMakeOtherLost || canCreateNewBlock)
    10275  {
    10276  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10277  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10279 
    10280  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10281  {
    10282  // Use only last block.
    10283  if(!m_Blocks.empty())
    10284  {
    10285  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10286  VMA_ASSERT(pCurrBlock);
    10287  VkResult res = AllocateFromBlock(
    10288  pCurrBlock,
    10289  hCurrentPool,
    10290  currentFrameIndex,
    10291  size,
    10292  alignment,
    10293  allocFlagsCopy,
    10294  createInfo.pUserData,
    10295  suballocType,
    10296  strategy,
    10297  pAllocation);
    10298  if(res == VK_SUCCESS)
    10299  {
    10300  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10301  return VK_SUCCESS;
    10302  }
    10303  }
    10304  }
    10305  else
    10306  {
    10308  {
    10309  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10310  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10311  {
    10312  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10313  VMA_ASSERT(pCurrBlock);
    10314  VkResult res = AllocateFromBlock(
    10315  pCurrBlock,
    10316  hCurrentPool,
    10317  currentFrameIndex,
    10318  size,
    10319  alignment,
    10320  allocFlagsCopy,
    10321  createInfo.pUserData,
    10322  suballocType,
    10323  strategy,
    10324  pAllocation);
    10325  if(res == VK_SUCCESS)
    10326  {
    10327  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10328  return VK_SUCCESS;
    10329  }
    10330  }
    10331  }
    10332  else // WORST_FIT, FIRST_FIT
    10333  {
    10334  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10335  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10336  {
    10337  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10338  VMA_ASSERT(pCurrBlock);
    10339  VkResult res = AllocateFromBlock(
    10340  pCurrBlock,
    10341  hCurrentPool,
    10342  currentFrameIndex,
    10343  size,
    10344  alignment,
    10345  allocFlagsCopy,
    10346  createInfo.pUserData,
    10347  suballocType,
    10348  strategy,
    10349  pAllocation);
    10350  if(res == VK_SUCCESS)
    10351  {
    10352  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10353  return VK_SUCCESS;
    10354  }
    10355  }
    10356  }
    10357  }
    10358 
    10359  // 2. Try to create new block.
    10360  if(canCreateNewBlock)
    10361  {
    10362  // Calculate optimal size for new block.
    10363  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10364  uint32_t newBlockSizeShift = 0;
    10365  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10366 
    10367  if(!m_ExplicitBlockSize)
    10368  {
    10369  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10370  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10371  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10372  {
    10373  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10374  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10375  {
    10376  newBlockSize = smallerNewBlockSize;
    10377  ++newBlockSizeShift;
    10378  }
    10379  else
    10380  {
    10381  break;
    10382  }
    10383  }
    10384  }
    10385 
    10386  size_t newBlockIndex = 0;
    10387  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10388  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10389  if(!m_ExplicitBlockSize)
    10390  {
    10391  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10392  {
    10393  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10394  if(smallerNewBlockSize >= size)
    10395  {
    10396  newBlockSize = smallerNewBlockSize;
    10397  ++newBlockSizeShift;
    10398  res = CreateBlock(newBlockSize, &newBlockIndex);
    10399  }
    10400  else
    10401  {
    10402  break;
    10403  }
    10404  }
    10405  }
    10406 
    10407  if(res == VK_SUCCESS)
    10408  {
    10409  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10410  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10411 
    10412  res = AllocateFromBlock(
    10413  pBlock,
    10414  hCurrentPool,
    10415  currentFrameIndex,
    10416  size,
    10417  alignment,
    10418  allocFlagsCopy,
    10419  createInfo.pUserData,
    10420  suballocType,
    10421  strategy,
    10422  pAllocation);
    10423  if(res == VK_SUCCESS)
    10424  {
    10425  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10426  return VK_SUCCESS;
    10427  }
    10428  else
    10429  {
    10430  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10431  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10432  }
    10433  }
    10434  }
    10435  }
    10436 
    10437  // 3. Try to allocate from existing blocks with making other allocations lost.
    10438  if(canMakeOtherLost)
    10439  {
    10440  uint32_t tryIndex = 0;
    10441  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10442  {
    10443  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10444  VmaAllocationRequest bestRequest = {};
    10445  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10446 
    10447  // 1. Search existing allocations.
    10449  {
    10450  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10451  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10452  {
    10453  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10454  VMA_ASSERT(pCurrBlock);
    10455  VmaAllocationRequest currRequest = {};
    10456  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10457  currentFrameIndex,
    10458  m_FrameInUseCount,
    10459  m_BufferImageGranularity,
    10460  size,
    10461  alignment,
    10462  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10463  suballocType,
    10464  canMakeOtherLost,
    10465  strategy,
    10466  &currRequest))
    10467  {
    10468  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10469  if(pBestRequestBlock == VMA_NULL ||
    10470  currRequestCost < bestRequestCost)
    10471  {
    10472  pBestRequestBlock = pCurrBlock;
    10473  bestRequest = currRequest;
    10474  bestRequestCost = currRequestCost;
    10475 
    10476  if(bestRequestCost == 0)
    10477  {
    10478  break;
    10479  }
    10480  }
    10481  }
    10482  }
    10483  }
    10484  else // WORST_FIT, FIRST_FIT
    10485  {
    10486  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10487  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10488  {
    10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10490  VMA_ASSERT(pCurrBlock);
    10491  VmaAllocationRequest currRequest = {};
    10492  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10493  currentFrameIndex,
    10494  m_FrameInUseCount,
    10495  m_BufferImageGranularity,
    10496  size,
    10497  alignment,
    10498  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10499  suballocType,
    10500  canMakeOtherLost,
    10501  strategy,
    10502  &currRequest))
    10503  {
    10504  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10505  if(pBestRequestBlock == VMA_NULL ||
    10506  currRequestCost < bestRequestCost ||
    10508  {
    10509  pBestRequestBlock = pCurrBlock;
    10510  bestRequest = currRequest;
    10511  bestRequestCost = currRequestCost;
    10512 
    10513  if(bestRequestCost == 0 ||
    10515  {
    10516  break;
    10517  }
    10518  }
    10519  }
    10520  }
    10521  }
    10522 
    10523  if(pBestRequestBlock != VMA_NULL)
    10524  {
    10525  if(mapped)
    10526  {
    10527  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10528  if(res != VK_SUCCESS)
    10529  {
    10530  return res;
    10531  }
    10532  }
    10533 
    10534  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10535  currentFrameIndex,
    10536  m_FrameInUseCount,
    10537  &bestRequest))
    10538  {
    10539  // We no longer have an empty Allocation.
    10540  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10541  {
    10542  m_HasEmptyBlock = false;
    10543  }
    10544  // Allocate from this pBlock.
    10545  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10546  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10547  (*pAllocation)->InitBlockAllocation(
    10548  hCurrentPool,
    10549  pBestRequestBlock,
    10550  bestRequest.offset,
    10551  alignment,
    10552  size,
    10553  suballocType,
    10554  mapped,
    10555  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10556  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10557  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10558  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10559  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10560  {
    10561  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10562  }
    10563  if(IsCorruptionDetectionEnabled())
    10564  {
    10565  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10566  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10567  }
    10568  return VK_SUCCESS;
    10569  }
    10570  // else: Some allocations must have been touched while we are here. Next try.
    10571  }
    10572  else
    10573  {
    10574  // Could not find place in any of the blocks - break outer loop.
    10575  break;
    10576  }
    10577  }
    10578  /* Maximum number of tries exceeded - a very unlike event when many other
    10579  threads are simultaneously touching allocations making it impossible to make
    10580  lost at the same time as we try to allocate. */
    10581  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10582  {
    10583  return VK_ERROR_TOO_MANY_OBJECTS;
    10584  }
    10585  }
    10586 
    10587  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10588 }
    10589 
    10590 void VmaBlockVector::Free(
    10591  VmaAllocation hAllocation)
    10592 {
    10593  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10594 
    10595  // Scope for lock.
    10596  {
    10597  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10598 
    10599  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10600 
    10601  if(IsCorruptionDetectionEnabled())
    10602  {
    10603  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10604  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10605  }
    10606 
    10607  if(hAllocation->IsPersistentMap())
    10608  {
    10609  pBlock->Unmap(m_hAllocator, 1);
    10610  }
    10611 
    10612  pBlock->m_pMetadata->Free(hAllocation);
    10613  VMA_HEAVY_ASSERT(pBlock->Validate());
    10614 
    10615  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10616 
    10617  // pBlock became empty after this deallocation.
    10618  if(pBlock->m_pMetadata->IsEmpty())
    10619  {
    10620  // Already has empty Allocation. We don't want to have two, so delete this one.
    10621  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10622  {
    10623  pBlockToDelete = pBlock;
    10624  Remove(pBlock);
    10625  }
    10626  // We now have first empty block.
    10627  else
    10628  {
    10629  m_HasEmptyBlock = true;
    10630  }
    10631  }
    10632  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10633  // (This is optional, heuristics.)
    10634  else if(m_HasEmptyBlock)
    10635  {
    10636  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10637  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10638  {
    10639  pBlockToDelete = pLastBlock;
    10640  m_Blocks.pop_back();
    10641  m_HasEmptyBlock = false;
    10642  }
    10643  }
    10644 
    10645  IncrementallySortBlocks();
    10646  }
    10647 
    10648  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10649  // lock, for performance reason.
    10650  if(pBlockToDelete != VMA_NULL)
    10651  {
    10652  VMA_DEBUG_LOG(" Deleted empty allocation");
    10653  pBlockToDelete->Destroy(m_hAllocator);
    10654  vma_delete(m_hAllocator, pBlockToDelete);
    10655  }
    10656 }
    10657 
    10658 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10659 {
    10660  VkDeviceSize result = 0;
    10661  for(size_t i = m_Blocks.size(); i--; )
    10662  {
    10663  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10664  if(result >= m_PreferredBlockSize)
    10665  {
    10666  break;
    10667  }
    10668  }
    10669  return result;
    10670 }
    10671 
    10672 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10673 {
    10674  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10675  {
    10676  if(m_Blocks[blockIndex] == pBlock)
    10677  {
    10678  VmaVectorRemove(m_Blocks, blockIndex);
    10679  return;
    10680  }
    10681  }
    10682  VMA_ASSERT(0);
    10683 }
    10684 
    10685 void VmaBlockVector::IncrementallySortBlocks()
    10686 {
    10687  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10688  {
    10689  // Bubble sort only until first swap.
    10690  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10691  {
    10692  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10693  {
    10694  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10695  return;
    10696  }
    10697  }
    10698  }
    10699 }
    10700 
    10701 VkResult VmaBlockVector::AllocateFromBlock(
    10702  VmaDeviceMemoryBlock* pBlock,
    10703  VmaPool hCurrentPool,
    10704  uint32_t currentFrameIndex,
    10705  VkDeviceSize size,
    10706  VkDeviceSize alignment,
    10707  VmaAllocationCreateFlags allocFlags,
    10708  void* pUserData,
    10709  VmaSuballocationType suballocType,
    10710  uint32_t strategy,
    10711  VmaAllocation* pAllocation)
    10712 {
    10713  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10714  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10715  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10716  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10717 
    10718  VmaAllocationRequest currRequest = {};
    10719  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10720  currentFrameIndex,
    10721  m_FrameInUseCount,
    10722  m_BufferImageGranularity,
    10723  size,
    10724  alignment,
    10725  isUpperAddress,
    10726  suballocType,
    10727  false, // canMakeOtherLost
    10728  strategy,
    10729  &currRequest))
    10730  {
    10731  // Allocate from pCurrBlock.
    10732  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10733 
    10734  if(mapped)
    10735  {
    10736  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10737  if(res != VK_SUCCESS)
    10738  {
    10739  return res;
    10740  }
    10741  }
    10742 
    10743  // We no longer have an empty Allocation.
    10744  if(pBlock->m_pMetadata->IsEmpty())
    10745  {
    10746  m_HasEmptyBlock = false;
    10747  }
    10748 
    10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10750  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10751  (*pAllocation)->InitBlockAllocation(
    10752  hCurrentPool,
    10753  pBlock,
    10754  currRequest.offset,
    10755  alignment,
    10756  size,
    10757  suballocType,
    10758  mapped,
    10759  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10760  VMA_HEAVY_ASSERT(pBlock->Validate());
    10761  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10762  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10763  {
    10764  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10765  }
    10766  if(IsCorruptionDetectionEnabled())
    10767  {
    10768  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10769  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10770  }
    10771  return VK_SUCCESS;
    10772  }
    10773  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10774 }
    10775 
    10776 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10777 {
    10778  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10779  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10780  allocInfo.allocationSize = blockSize;
    10781  VkDeviceMemory mem = VK_NULL_HANDLE;
    10782  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10783  if(res < 0)
    10784  {
    10785  return res;
    10786  }
    10787 
    10788  // New VkDeviceMemory successfully created.
    10789 
    10790  // Create new Allocation for it.
    10791  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10792  pBlock->Init(
    10793  m_hAllocator,
    10794  m_MemoryTypeIndex,
    10795  mem,
    10796  allocInfo.allocationSize,
    10797  m_NextBlockId++,
    10798  m_Algorithm);
    10799 
    10800  m_Blocks.push_back(pBlock);
    10801  if(pNewBlockIndex != VMA_NULL)
    10802  {
    10803  *pNewBlockIndex = m_Blocks.size() - 1;
    10804  }
    10805 
    10806  return VK_SUCCESS;
    10807 }
    10808 
    10809 #if VMA_STATS_STRING_ENABLED
    10810 
    10811 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10812 {
    10813  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10814 
    10815  json.BeginObject();
    10816 
    10817  if(m_IsCustomPool)
    10818  {
    10819  json.WriteString("MemoryTypeIndex");
    10820  json.WriteNumber(m_MemoryTypeIndex);
    10821 
    10822  json.WriteString("BlockSize");
    10823  json.WriteNumber(m_PreferredBlockSize);
    10824 
    10825  json.WriteString("BlockCount");
    10826  json.BeginObject(true);
    10827  if(m_MinBlockCount > 0)
    10828  {
    10829  json.WriteString("Min");
    10830  json.WriteNumber((uint64_t)m_MinBlockCount);
    10831  }
    10832  if(m_MaxBlockCount < SIZE_MAX)
    10833  {
    10834  json.WriteString("Max");
    10835  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10836  }
    10837  json.WriteString("Cur");
    10838  json.WriteNumber((uint64_t)m_Blocks.size());
    10839  json.EndObject();
    10840 
    10841  if(m_FrameInUseCount > 0)
    10842  {
    10843  json.WriteString("FrameInUseCount");
    10844  json.WriteNumber(m_FrameInUseCount);
    10845  }
    10846 
    10847  if(m_Algorithm != 0)
    10848  {
    10849  json.WriteString("Algorithm");
    10850  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10851  }
    10852  }
    10853  else
    10854  {
    10855  json.WriteString("PreferredBlockSize");
    10856  json.WriteNumber(m_PreferredBlockSize);
    10857  }
    10858 
    10859  json.WriteString("Blocks");
    10860  json.BeginObject();
    10861  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10862  {
    10863  json.BeginString();
    10864  json.ContinueString(m_Blocks[i]->GetId());
    10865  json.EndString();
    10866 
    10867  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10868  }
    10869  json.EndObject();
    10870 
    10871  json.EndObject();
    10872 }
    10873 
    10874 #endif // #if VMA_STATS_STRING_ENABLED
    10875 
    10876 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10877  VmaAllocator hAllocator,
    10878  uint32_t currentFrameIndex)
    10879 {
    10880  if(m_pDefragmentator == VMA_NULL)
    10881  {
    10882  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10883  hAllocator,
    10884  this,
    10885  currentFrameIndex);
    10886  }
    10887 
    10888  return m_pDefragmentator;
    10889 }
    10890 
    10891 VkResult VmaBlockVector::Defragment(
    10892  VmaDefragmentationStats* pDefragmentationStats,
    10893  VkDeviceSize& maxBytesToMove,
    10894  uint32_t& maxAllocationsToMove)
    10895 {
    10896  if(m_pDefragmentator == VMA_NULL)
    10897  {
    10898  return VK_SUCCESS;
    10899  }
    10900 
    10901  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10902 
    10903  // Defragment.
    10904  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10905 
    10906  // Accumulate statistics.
    10907  if(pDefragmentationStats != VMA_NULL)
    10908  {
    10909  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10910  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10911  pDefragmentationStats->bytesMoved += bytesMoved;
    10912  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10913  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10914  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10915  maxBytesToMove -= bytesMoved;
    10916  maxAllocationsToMove -= allocationsMoved;
    10917  }
    10918 
    10919  // Free empty blocks.
    10920  m_HasEmptyBlock = false;
    10921  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10922  {
    10923  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10924  if(pBlock->m_pMetadata->IsEmpty())
    10925  {
    10926  if(m_Blocks.size() > m_MinBlockCount)
    10927  {
    10928  if(pDefragmentationStats != VMA_NULL)
    10929  {
    10930  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10931  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10932  }
    10933 
    10934  VmaVectorRemove(m_Blocks, blockIndex);
    10935  pBlock->Destroy(m_hAllocator);
    10936  vma_delete(m_hAllocator, pBlock);
    10937  }
    10938  else
    10939  {
    10940  m_HasEmptyBlock = true;
    10941  }
    10942  }
    10943  }
    10944 
    10945  return result;
    10946 }
    10947 
    10948 void VmaBlockVector::DestroyDefragmentator()
    10949 {
    10950  if(m_pDefragmentator != VMA_NULL)
    10951  {
    10952  vma_delete(m_hAllocator, m_pDefragmentator);
    10953  m_pDefragmentator = VMA_NULL;
    10954  }
    10955 }
    10956 
    10957 void VmaBlockVector::MakePoolAllocationsLost(
    10958  uint32_t currentFrameIndex,
    10959  size_t* pLostAllocationCount)
    10960 {
    10961  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10962  size_t lostAllocationCount = 0;
    10963  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10964  {
    10965  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10966  VMA_ASSERT(pBlock);
    10967  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10968  }
    10969  if(pLostAllocationCount != VMA_NULL)
    10970  {
    10971  *pLostAllocationCount = lostAllocationCount;
    10972  }
    10973 }
    10974 
    10975 VkResult VmaBlockVector::CheckCorruption()
    10976 {
    10977  if(!IsCorruptionDetectionEnabled())
    10978  {
    10979  return VK_ERROR_FEATURE_NOT_PRESENT;
    10980  }
    10981 
    10982  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10983  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10984  {
    10985  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10986  VMA_ASSERT(pBlock);
    10987  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10988  if(res != VK_SUCCESS)
    10989  {
    10990  return res;
    10991  }
    10992  }
    10993  return VK_SUCCESS;
    10994 }
    10995 
    10996 void VmaBlockVector::AddStats(VmaStats* pStats)
    10997 {
    10998  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    10999  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11000 
    11001  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11002 
    11003  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11004  {
    11005  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11006  VMA_ASSERT(pBlock);
    11007  VMA_HEAVY_ASSERT(pBlock->Validate());
    11008  VmaStatInfo allocationStatInfo;
    11009  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11010  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11011  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11012  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11013  }
    11014 }
    11015 
    11017 // VmaDefragmentator members definition
    11018 
    11019 VmaDefragmentator::VmaDefragmentator(
    11020  VmaAllocator hAllocator,
    11021  VmaBlockVector* pBlockVector,
    11022  uint32_t currentFrameIndex) :
    11023  m_hAllocator(hAllocator),
    11024  m_pBlockVector(pBlockVector),
    11025  m_CurrentFrameIndex(currentFrameIndex),
    11026  m_BytesMoved(0),
    11027  m_AllocationsMoved(0),
    11028  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11029  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11030 {
    11031  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11032 }
    11033 
    11034 VmaDefragmentator::~VmaDefragmentator()
    11035 {
    11036  for(size_t i = m_Blocks.size(); i--; )
    11037  {
    11038  vma_delete(m_hAllocator, m_Blocks[i]);
    11039  }
    11040 }
    11041 
    11042 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11043 {
    11044  AllocationInfo allocInfo;
    11045  allocInfo.m_hAllocation = hAlloc;
    11046  allocInfo.m_pChanged = pChanged;
    11047  m_Allocations.push_back(allocInfo);
    11048 }
    11049 
    11050 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11051 {
    11052  // It has already been mapped for defragmentation.
    11053  if(m_pMappedDataForDefragmentation)
    11054  {
    11055  *ppMappedData = m_pMappedDataForDefragmentation;
    11056  return VK_SUCCESS;
    11057  }
    11058 
    11059  // It is originally mapped.
    11060  if(m_pBlock->GetMappedData())
    11061  {
    11062  *ppMappedData = m_pBlock->GetMappedData();
    11063  return VK_SUCCESS;
    11064  }
    11065 
    11066  // Map on first usage.
    11067  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11068  *ppMappedData = m_pMappedDataForDefragmentation;
    11069  return res;
    11070 }
    11071 
    11072 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11073 {
    11074  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11075  {
    11076  m_pBlock->Unmap(hAllocator, 1);
    11077  }
    11078 }
    11079 
    11080 VkResult VmaDefragmentator::DefragmentRound(
    11081  VkDeviceSize maxBytesToMove,
    11082  uint32_t maxAllocationsToMove)
    11083 {
    11084  if(m_Blocks.empty())
    11085  {
    11086  return VK_SUCCESS;
    11087  }
    11088 
    11089  size_t srcBlockIndex = m_Blocks.size() - 1;
    11090  size_t srcAllocIndex = SIZE_MAX;
    11091  for(;;)
    11092  {
    11093  // 1. Find next allocation to move.
    11094  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11095  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11096  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11097  {
    11098  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11099  {
    11100  // Finished: no more allocations to process.
    11101  if(srcBlockIndex == 0)
    11102  {
    11103  return VK_SUCCESS;
    11104  }
    11105  else
    11106  {
    11107  --srcBlockIndex;
    11108  srcAllocIndex = SIZE_MAX;
    11109  }
    11110  }
    11111  else
    11112  {
    11113  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11114  }
    11115  }
    11116 
    11117  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11118  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11119 
    11120  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11121  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11122  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11123  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11124 
    11125  // 2. Try to find new place for this allocation in preceding or current block.
    11126  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11127  {
    11128  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11129  VmaAllocationRequest dstAllocRequest;
    11130  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11131  m_CurrentFrameIndex,
    11132  m_pBlockVector->GetFrameInUseCount(),
    11133  m_pBlockVector->GetBufferImageGranularity(),
    11134  size,
    11135  alignment,
    11136  false, // upperAddress
    11137  suballocType,
    11138  false, // canMakeOtherLost
    11140  &dstAllocRequest) &&
    11141  MoveMakesSense(
    11142  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11143  {
    11144  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11145 
    11146  // Reached limit on number of allocations or bytes to move.
    11147  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11148  (m_BytesMoved + size > maxBytesToMove))
    11149  {
    11150  return VK_INCOMPLETE;
    11151  }
    11152 
    11153  void* pDstMappedData = VMA_NULL;
    11154  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11155  if(res != VK_SUCCESS)
    11156  {
    11157  return res;
    11158  }
    11159 
    11160  void* pSrcMappedData = VMA_NULL;
    11161  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11162  if(res != VK_SUCCESS)
    11163  {
    11164  return res;
    11165  }
    11166 
    11167  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11168  memcpy(
    11169  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11170  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11171  static_cast<size_t>(size));
    11172 
    11173  if(VMA_DEBUG_MARGIN > 0)
    11174  {
    11175  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11176  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11177  }
    11178 
    11179  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11180  dstAllocRequest,
    11181  suballocType,
    11182  size,
    11183  false, // upperAddress
    11184  allocInfo.m_hAllocation);
    11185  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11186 
    11187  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11188 
    11189  if(allocInfo.m_pChanged != VMA_NULL)
    11190  {
    11191  *allocInfo.m_pChanged = VK_TRUE;
    11192  }
    11193 
    11194  ++m_AllocationsMoved;
    11195  m_BytesMoved += size;
    11196 
    11197  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11198 
    11199  break;
    11200  }
    11201  }
    11202 
    11203  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11204 
    11205  if(srcAllocIndex > 0)
    11206  {
    11207  --srcAllocIndex;
    11208  }
    11209  else
    11210  {
    11211  if(srcBlockIndex > 0)
    11212  {
    11213  --srcBlockIndex;
    11214  srcAllocIndex = SIZE_MAX;
    11215  }
    11216  else
    11217  {
    11218  return VK_SUCCESS;
    11219  }
    11220  }
    11221  }
    11222 }
    11223 
    11224 VkResult VmaDefragmentator::Defragment(
    11225  VkDeviceSize maxBytesToMove,
    11226  uint32_t maxAllocationsToMove)
    11227 {
    11228  if(m_Allocations.empty())
    11229  {
    11230  return VK_SUCCESS;
    11231  }
    11232 
    11233  // Create block info for each block.
    11234  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11235  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11236  {
    11237  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11238  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11239  m_Blocks.push_back(pBlockInfo);
    11240  }
    11241 
    11242  // Sort them by m_pBlock pointer value.
    11243  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11244 
    11245  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11246  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11247  {
    11248  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11249  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11250  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11251  {
    11252  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11253  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11254  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11255  {
    11256  (*it)->m_Allocations.push_back(allocInfo);
    11257  }
    11258  else
    11259  {
    11260  VMA_ASSERT(0);
    11261  }
    11262  }
    11263  }
    11264  m_Allocations.clear();
    11265 
    11266  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11267  {
    11268  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11269  pBlockInfo->CalcHasNonMovableAllocations();
    11270  pBlockInfo->SortAllocationsBySizeDescecnding();
    11271  }
    11272 
    11273  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11274  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11275 
    11276  // Execute defragmentation rounds (the main part).
    11277  VkResult result = VK_SUCCESS;
    11278  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11279  {
    11280  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11281  }
    11282 
    11283  // Unmap blocks that were mapped for defragmentation.
    11284  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11285  {
    11286  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11287  }
    11288 
    11289  return result;
    11290 }
    11291 
    11292 bool VmaDefragmentator::MoveMakesSense(
    11293  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11294  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11295 {
    11296  if(dstBlockIndex < srcBlockIndex)
    11297  {
    11298  return true;
    11299  }
    11300  if(dstBlockIndex > srcBlockIndex)
    11301  {
    11302  return false;
    11303  }
    11304  if(dstOffset < srcOffset)
    11305  {
    11306  return true;
    11307  }
    11308  return false;
    11309 }
    11310 
    11312 // VmaRecorder
    11313 
    11314 #if VMA_RECORDING_ENABLED
    11315 
    11316 VmaRecorder::VmaRecorder() :
    11317  m_UseMutex(true),
    11318  m_Flags(0),
    11319  m_File(VMA_NULL),
    11320  m_Freq(INT64_MAX),
    11321  m_StartCounter(INT64_MAX)
    11322 {
    11323 }
    11324 
    11325 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11326 {
    11327  m_UseMutex = useMutex;
    11328  m_Flags = settings.flags;
    11329 
    11330  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11331  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11332 
    11333  // Open file for writing.
    11334  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11335  if(err != 0)
    11336  {
    11337  return VK_ERROR_INITIALIZATION_FAILED;
    11338  }
    11339 
    11340  // Write header.
    11341  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11342  fprintf(m_File, "%s\n", "1,3");
    11343 
    11344  return VK_SUCCESS;
    11345 }
    11346 
    11347 VmaRecorder::~VmaRecorder()
    11348 {
    11349  if(m_File != VMA_NULL)
    11350  {
    11351  fclose(m_File);
    11352  }
    11353 }
    11354 
    11355 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11356 {
    11357  CallParams callParams;
    11358  GetBasicParams(callParams);
    11359 
    11360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11361  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11362  Flush();
    11363 }
    11364 
    11365 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11366 {
    11367  CallParams callParams;
    11368  GetBasicParams(callParams);
    11369 
    11370  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11371  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11372  Flush();
    11373 }
    11374 
    11375 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11376 {
    11377  CallParams callParams;
    11378  GetBasicParams(callParams);
    11379 
    11380  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11381  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11382  createInfo.memoryTypeIndex,
    11383  createInfo.flags,
    11384  createInfo.blockSize,
    11385  (uint64_t)createInfo.minBlockCount,
    11386  (uint64_t)createInfo.maxBlockCount,
    11387  createInfo.frameInUseCount,
    11388  pool);
    11389  Flush();
    11390 }
    11391 
    11392 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11393 {
    11394  CallParams callParams;
    11395  GetBasicParams(callParams);
    11396 
    11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11398  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11399  pool);
    11400  Flush();
    11401 }
    11402 
    11403 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11404  const VkMemoryRequirements& vkMemReq,
    11405  const VmaAllocationCreateInfo& createInfo,
    11406  VmaAllocation allocation)
    11407 {
    11408  CallParams callParams;
    11409  GetBasicParams(callParams);
    11410 
    11411  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11412  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11413  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11414  vkMemReq.size,
    11415  vkMemReq.alignment,
    11416  vkMemReq.memoryTypeBits,
    11417  createInfo.flags,
    11418  createInfo.usage,
    11419  createInfo.requiredFlags,
    11420  createInfo.preferredFlags,
    11421  createInfo.memoryTypeBits,
    11422  createInfo.pool,
    11423  allocation,
    11424  userDataStr.GetString());
    11425  Flush();
    11426 }
    11427 
    11428 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11429  const VkMemoryRequirements& vkMemReq,
    11430  bool requiresDedicatedAllocation,
    11431  bool prefersDedicatedAllocation,
    11432  const VmaAllocationCreateInfo& createInfo,
    11433  VmaAllocation allocation)
    11434 {
    11435  CallParams callParams;
    11436  GetBasicParams(callParams);
    11437 
    11438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11439  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11440  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11441  vkMemReq.size,
    11442  vkMemReq.alignment,
    11443  vkMemReq.memoryTypeBits,
    11444  requiresDedicatedAllocation ? 1 : 0,
    11445  prefersDedicatedAllocation ? 1 : 0,
    11446  createInfo.flags,
    11447  createInfo.usage,
    11448  createInfo.requiredFlags,
    11449  createInfo.preferredFlags,
    11450  createInfo.memoryTypeBits,
    11451  createInfo.pool,
    11452  allocation,
    11453  userDataStr.GetString());
    11454  Flush();
    11455 }
    11456 
    11457 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11458  const VkMemoryRequirements& vkMemReq,
    11459  bool requiresDedicatedAllocation,
    11460  bool prefersDedicatedAllocation,
    11461  const VmaAllocationCreateInfo& createInfo,
    11462  VmaAllocation allocation)
    11463 {
    11464  CallParams callParams;
    11465  GetBasicParams(callParams);
    11466 
    11467  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11468  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11469  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11470  vkMemReq.size,
    11471  vkMemReq.alignment,
    11472  vkMemReq.memoryTypeBits,
    11473  requiresDedicatedAllocation ? 1 : 0,
    11474  prefersDedicatedAllocation ? 1 : 0,
    11475  createInfo.flags,
    11476  createInfo.usage,
    11477  createInfo.requiredFlags,
    11478  createInfo.preferredFlags,
    11479  createInfo.memoryTypeBits,
    11480  createInfo.pool,
    11481  allocation,
    11482  userDataStr.GetString());
    11483  Flush();
    11484 }
    11485 
    11486 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11487  VmaAllocation allocation)
    11488 {
    11489  CallParams callParams;
    11490  GetBasicParams(callParams);
    11491 
    11492  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11493  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11494  allocation);
    11495  Flush();
    11496 }
    11497 
    11498 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11499  VmaAllocation allocation,
    11500  const void* pUserData)
    11501 {
    11502  CallParams callParams;
    11503  GetBasicParams(callParams);
    11504 
    11505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11506  UserDataString userDataStr(
    11507  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11508  pUserData);
    11509  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11510  allocation,
    11511  userDataStr.GetString());
    11512  Flush();
    11513 }
    11514 
    11515 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11516  VmaAllocation allocation)
    11517 {
    11518  CallParams callParams;
    11519  GetBasicParams(callParams);
    11520 
    11521  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11522  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11523  allocation);
    11524  Flush();
    11525 }
    11526 
    11527 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11528  VmaAllocation allocation)
    11529 {
    11530  CallParams callParams;
    11531  GetBasicParams(callParams);
    11532 
    11533  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11534  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11535  allocation);
    11536  Flush();
    11537 }
    11538 
    11539 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11540  VmaAllocation allocation)
    11541 {
    11542  CallParams callParams;
    11543  GetBasicParams(callParams);
    11544 
    11545  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11546  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11547  allocation);
    11548  Flush();
    11549 }
    11550 
    11551 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11552  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11553 {
    11554  CallParams callParams;
    11555  GetBasicParams(callParams);
    11556 
    11557  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11558  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11559  allocation,
    11560  offset,
    11561  size);
    11562  Flush();
    11563 }
    11564 
    11565 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11566  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11567 {
    11568  CallParams callParams;
    11569  GetBasicParams(callParams);
    11570 
    11571  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11572  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11573  allocation,
    11574  offset,
    11575  size);
    11576  Flush();
    11577 }
    11578 
    11579 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11580  const VkBufferCreateInfo& bufCreateInfo,
    11581  const VmaAllocationCreateInfo& allocCreateInfo,
    11582  VmaAllocation allocation)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11590  bufCreateInfo.flags,
    11591  bufCreateInfo.size,
    11592  bufCreateInfo.usage,
    11593  bufCreateInfo.sharingMode,
    11594  allocCreateInfo.flags,
    11595  allocCreateInfo.usage,
    11596  allocCreateInfo.requiredFlags,
    11597  allocCreateInfo.preferredFlags,
    11598  allocCreateInfo.memoryTypeBits,
    11599  allocCreateInfo.pool,
    11600  allocation,
    11601  userDataStr.GetString());
    11602  Flush();
    11603 }
    11604 
    11605 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11606  const VkImageCreateInfo& imageCreateInfo,
    11607  const VmaAllocationCreateInfo& allocCreateInfo,
    11608  VmaAllocation allocation)
    11609 {
    11610  CallParams callParams;
    11611  GetBasicParams(callParams);
    11612 
    11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11614  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11615  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11616  imageCreateInfo.flags,
    11617  imageCreateInfo.imageType,
    11618  imageCreateInfo.format,
    11619  imageCreateInfo.extent.width,
    11620  imageCreateInfo.extent.height,
    11621  imageCreateInfo.extent.depth,
    11622  imageCreateInfo.mipLevels,
    11623  imageCreateInfo.arrayLayers,
    11624  imageCreateInfo.samples,
    11625  imageCreateInfo.tiling,
    11626  imageCreateInfo.usage,
    11627  imageCreateInfo.sharingMode,
    11628  imageCreateInfo.initialLayout,
    11629  allocCreateInfo.flags,
    11630  allocCreateInfo.usage,
    11631  allocCreateInfo.requiredFlags,
    11632  allocCreateInfo.preferredFlags,
    11633  allocCreateInfo.memoryTypeBits,
    11634  allocCreateInfo.pool,
    11635  allocation,
    11636  userDataStr.GetString());
    11637  Flush();
    11638 }
    11639 
    11640 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11641  VmaAllocation allocation)
    11642 {
    11643  CallParams callParams;
    11644  GetBasicParams(callParams);
    11645 
    11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11647  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11648  allocation);
    11649  Flush();
    11650 }
    11651 
    11652 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11653  VmaAllocation allocation)
    11654 {
    11655  CallParams callParams;
    11656  GetBasicParams(callParams);
    11657 
    11658  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11659  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11660  allocation);
    11661  Flush();
    11662 }
    11663 
    11664 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11665  VmaAllocation allocation)
    11666 {
    11667  CallParams callParams;
    11668  GetBasicParams(callParams);
    11669 
    11670  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11671  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11672  allocation);
    11673  Flush();
    11674 }
    11675 
    11676 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11677  VmaAllocation allocation)
    11678 {
    11679  CallParams callParams;
    11680  GetBasicParams(callParams);
    11681 
    11682  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11683  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11684  allocation);
    11685  Flush();
    11686 }
    11687 
    11688 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11689  VmaPool pool)
    11690 {
    11691  CallParams callParams;
    11692  GetBasicParams(callParams);
    11693 
    11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11695  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11696  pool);
    11697  Flush();
    11698 }
    11699 
    11700 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11701 {
    11702  if(pUserData != VMA_NULL)
    11703  {
    11704  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11705  {
    11706  m_Str = (const char*)pUserData;
    11707  }
    11708  else
    11709  {
    11710  sprintf_s(m_PtrStr, "%p", pUserData);
    11711  m_Str = m_PtrStr;
    11712  }
    11713  }
    11714  else
    11715  {
    11716  m_Str = "";
    11717  }
    11718 }
    11719 
    11720 void VmaRecorder::WriteConfiguration(
    11721  const VkPhysicalDeviceProperties& devProps,
    11722  const VkPhysicalDeviceMemoryProperties& memProps,
    11723  bool dedicatedAllocationExtensionEnabled)
    11724 {
    11725  fprintf(m_File, "Config,Begin\n");
    11726 
    11727  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11728  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11729  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11730  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11731  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11732  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11733 
    11734  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11735  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11736  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11737 
    11738  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11739  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11740  {
    11741  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11742  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11743  }
    11744  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11745  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11746  {
    11747  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11748  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11749  }
    11750 
    11751  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11752 
    11753  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11754  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11755  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11756  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11757  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11758  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11759  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11760  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11761  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11762 
    11763  fprintf(m_File, "Config,End\n");
    11764 }
    11765 
    11766 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11767 {
    11768  outParams.threadId = GetCurrentThreadId();
    11769 
    11770  LARGE_INTEGER counter;
    11771  QueryPerformanceCounter(&counter);
    11772  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11773 }
    11774 
    11775 void VmaRecorder::Flush()
    11776 {
    11777  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11778  {
    11779  fflush(m_File);
    11780  }
    11781 }
    11782 
    11783 #endif // #if VMA_RECORDING_ENABLED
    11784 
    11786 // VmaAllocator_T
    11787 
    11788 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11789  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11790  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11791  m_hDevice(pCreateInfo->device),
    11792  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11793  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11794  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11795  m_PreferredLargeHeapBlockSize(0),
    11796  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11797  m_CurrentFrameIndex(0),
    11798  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11799  m_NextPoolId(0)
    11801  ,m_pRecorder(VMA_NULL)
    11802 #endif
    11803 {
    11804  if(VMA_DEBUG_DETECT_CORRUPTION)
    11805  {
    11806  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11807  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11808  }
    11809 
    11810  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11811 
    11812 #if !(VMA_DEDICATED_ALLOCATION)
    11814  {
    11815  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11816  }
    11817 #endif
    11818 
    11819  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11820  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11821  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11822 
    11823  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11824  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11825 
    11826  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11827  {
    11828  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11829  }
    11830 
    11831  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11832  {
    11833  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11834  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11835  }
    11836 
    11837  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11838 
    11839  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11840  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11841 
    11842  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11843  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11844  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11845  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11846 
    11847  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11848  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11849 
    11850  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11851  {
    11852  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11853  {
    11854  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11855  if(limit != VK_WHOLE_SIZE)
    11856  {
    11857  m_HeapSizeLimit[heapIndex] = limit;
    11858  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11859  {
    11860  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11861  }
    11862  }
    11863  }
    11864  }
    11865 
    11866  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11867  {
    11868  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11869 
    11870  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11871  this,
    11872  memTypeIndex,
    11873  preferredBlockSize,
    11874  0,
    11875  SIZE_MAX,
    11876  GetBufferImageGranularity(),
    11877  pCreateInfo->frameInUseCount,
    11878  false, // isCustomPool
    11879  false, // explicitBlockSize
    11880  false); // linearAlgorithm
    11881  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11882  // becase minBlockCount is 0.
    11883  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11884 
    11885  }
    11886 }
    11887 
    11888 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11889 {
    11890  VkResult res = VK_SUCCESS;
    11891 
    11892  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11893  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11894  {
    11895 #if VMA_RECORDING_ENABLED
    11896  m_pRecorder = vma_new(this, VmaRecorder)();
    11897  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11898  if(res != VK_SUCCESS)
    11899  {
    11900  return res;
    11901  }
    11902  m_pRecorder->WriteConfiguration(
    11903  m_PhysicalDeviceProperties,
    11904  m_MemProps,
    11905  m_UseKhrDedicatedAllocation);
    11906  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11907 #else
    11908  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11909  return VK_ERROR_FEATURE_NOT_PRESENT;
    11910 #endif
    11911  }
    11912 
    11913  return res;
    11914 }
    11915 
    11916 VmaAllocator_T::~VmaAllocator_T()
    11917 {
    11918 #if VMA_RECORDING_ENABLED
    11919  if(m_pRecorder != VMA_NULL)
    11920  {
    11921  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11922  vma_delete(this, m_pRecorder);
    11923  }
    11924 #endif
    11925 
    11926  VMA_ASSERT(m_Pools.empty());
    11927 
    11928  for(size_t i = GetMemoryTypeCount(); i--; )
    11929  {
    11930  vma_delete(this, m_pDedicatedAllocations[i]);
    11931  vma_delete(this, m_pBlockVectors[i]);
    11932  }
    11933 }
    11934 
    11935 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11936 {
    11937 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11938  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11939  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11940  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11941  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11942  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11943  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11944  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11945  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11946  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11947  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11948  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11949  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11950  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11951  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11952  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11953  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11954 #if VMA_DEDICATED_ALLOCATION
    11955  if(m_UseKhrDedicatedAllocation)
    11956  {
    11957  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11958  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11959  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11960  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11961  }
    11962 #endif // #if VMA_DEDICATED_ALLOCATION
    11963 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11964 
    11965 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11966  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11967 
    11968  if(pVulkanFunctions != VMA_NULL)
    11969  {
    11970  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11971  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11972  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11973  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11974  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11975  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11976  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11977  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11978  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11979  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11980  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11981  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11982  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11983  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11984  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11985  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11986 #if VMA_DEDICATED_ALLOCATION
    11987  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11988  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11989 #endif
    11990  }
    11991 
    11992 #undef VMA_COPY_IF_NOT_NULL
    11993 
    11994  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    11995  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    11996  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    11997  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    11998  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    11999  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12000  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12001  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12002  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12003  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12004  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12012 #if VMA_DEDICATED_ALLOCATION
    12013  if(m_UseKhrDedicatedAllocation)
    12014  {
    12015  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12017  }
    12018 #endif
    12019 }
    12020 
    12021 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12022 {
    12023  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12024  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12025  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12026  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12027 }
    12028 
    12029 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12030  VkDeviceSize size,
    12031  VkDeviceSize alignment,
    12032  bool dedicatedAllocation,
    12033  VkBuffer dedicatedBuffer,
    12034  VkImage dedicatedImage,
    12035  const VmaAllocationCreateInfo& createInfo,
    12036  uint32_t memTypeIndex,
    12037  VmaSuballocationType suballocType,
    12038  VmaAllocation* pAllocation)
    12039 {
    12040  VMA_ASSERT(pAllocation != VMA_NULL);
    12041  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12042 
    12043  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12044 
    12045  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12046  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12047  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12048  {
    12049  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12050  }
    12051 
    12052  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12053  VMA_ASSERT(blockVector);
    12054 
    12055  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12056  bool preferDedicatedMemory =
    12057  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12058  dedicatedAllocation ||
    12059  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12060  size > preferredBlockSize / 2;
    12061 
    12062  if(preferDedicatedMemory &&
    12063  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12064  finalCreateInfo.pool == VK_NULL_HANDLE)
    12065  {
    12067  }
    12068 
    12069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12070  {
    12071  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12072  {
    12073  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12074  }
    12075  else
    12076  {
    12077  return AllocateDedicatedMemory(
    12078  size,
    12079  suballocType,
    12080  memTypeIndex,
    12081  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12082  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12083  finalCreateInfo.pUserData,
    12084  dedicatedBuffer,
    12085  dedicatedImage,
    12086  pAllocation);
    12087  }
    12088  }
    12089  else
    12090  {
    12091  VkResult res = blockVector->Allocate(
    12092  VK_NULL_HANDLE, // hCurrentPool
    12093  m_CurrentFrameIndex.load(),
    12094  size,
    12095  alignment,
    12096  finalCreateInfo,
    12097  suballocType,
    12098  pAllocation);
    12099  if(res == VK_SUCCESS)
    12100  {
    12101  return res;
    12102  }
    12103 
    12104  // 5. Try dedicated memory.
    12105  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12106  {
    12107  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12108  }
    12109  else
    12110  {
    12111  res = AllocateDedicatedMemory(
    12112  size,
    12113  suballocType,
    12114  memTypeIndex,
    12115  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12116  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12117  finalCreateInfo.pUserData,
    12118  dedicatedBuffer,
    12119  dedicatedImage,
    12120  pAllocation);
    12121  if(res == VK_SUCCESS)
    12122  {
    12123  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12124  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12125  return VK_SUCCESS;
    12126  }
    12127  else
    12128  {
    12129  // Everything failed: Return error code.
    12130  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12131  return res;
    12132  }
    12133  }
    12134  }
    12135 }
    12136 
    12137 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12138  VkDeviceSize size,
    12139  VmaSuballocationType suballocType,
    12140  uint32_t memTypeIndex,
    12141  bool map,
    12142  bool isUserDataString,
    12143  void* pUserData,
    12144  VkBuffer dedicatedBuffer,
    12145  VkImage dedicatedImage,
    12146  VmaAllocation* pAllocation)
    12147 {
    12148  VMA_ASSERT(pAllocation);
    12149 
    12150  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12151  allocInfo.memoryTypeIndex = memTypeIndex;
    12152  allocInfo.allocationSize = size;
    12153 
    12154 #if VMA_DEDICATED_ALLOCATION
    12155  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12156  if(m_UseKhrDedicatedAllocation)
    12157  {
    12158  if(dedicatedBuffer != VK_NULL_HANDLE)
    12159  {
    12160  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12161  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12162  allocInfo.pNext = &dedicatedAllocInfo;
    12163  }
    12164  else if(dedicatedImage != VK_NULL_HANDLE)
    12165  {
    12166  dedicatedAllocInfo.image = dedicatedImage;
    12167  allocInfo.pNext = &dedicatedAllocInfo;
    12168  }
    12169  }
    12170 #endif // #if VMA_DEDICATED_ALLOCATION
    12171 
    12172  // Allocate VkDeviceMemory.
    12173  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12174  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12175  if(res < 0)
    12176  {
    12177  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12178  return res;
    12179  }
    12180 
    12181  void* pMappedData = VMA_NULL;
    12182  if(map)
    12183  {
    12184  res = (*m_VulkanFunctions.vkMapMemory)(
    12185  m_hDevice,
    12186  hMemory,
    12187  0,
    12188  VK_WHOLE_SIZE,
    12189  0,
    12190  &pMappedData);
    12191  if(res < 0)
    12192  {
    12193  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12194  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12195  return res;
    12196  }
    12197  }
    12198 
    12199  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12200  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12201  (*pAllocation)->SetUserData(this, pUserData);
    12202  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12203  {
    12204  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12205  }
    12206 
    12207  // Register it in m_pDedicatedAllocations.
    12208  {
    12209  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12210  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12211  VMA_ASSERT(pDedicatedAllocations);
    12212  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12213  }
    12214 
    12215  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12216 
    12217  return VK_SUCCESS;
    12218 }
    12219 
    12220 void VmaAllocator_T::GetBufferMemoryRequirements(
    12221  VkBuffer hBuffer,
    12222  VkMemoryRequirements& memReq,
    12223  bool& requiresDedicatedAllocation,
    12224  bool& prefersDedicatedAllocation) const
    12225 {
    12226 #if VMA_DEDICATED_ALLOCATION
    12227  if(m_UseKhrDedicatedAllocation)
    12228  {
    12229  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12230  memReqInfo.buffer = hBuffer;
    12231 
    12232  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12233 
    12234  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12235  memReq2.pNext = &memDedicatedReq;
    12236 
    12237  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12238 
    12239  memReq = memReq2.memoryRequirements;
    12240  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12241  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12242  }
    12243  else
    12244 #endif // #if VMA_DEDICATED_ALLOCATION
    12245  {
    12246  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12247  requiresDedicatedAllocation = false;
    12248  prefersDedicatedAllocation = false;
    12249  }
    12250 }
    12251 
    12252 void VmaAllocator_T::GetImageMemoryRequirements(
    12253  VkImage hImage,
    12254  VkMemoryRequirements& memReq,
    12255  bool& requiresDedicatedAllocation,
    12256  bool& prefersDedicatedAllocation) const
    12257 {
    12258 #if VMA_DEDICATED_ALLOCATION
    12259  if(m_UseKhrDedicatedAllocation)
    12260  {
    12261  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12262  memReqInfo.image = hImage;
    12263 
    12264  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12265 
    12266  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12267  memReq2.pNext = &memDedicatedReq;
    12268 
    12269  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12270 
    12271  memReq = memReq2.memoryRequirements;
    12272  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12273  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12274  }
    12275  else
    12276 #endif // #if VMA_DEDICATED_ALLOCATION
    12277  {
    12278  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12279  requiresDedicatedAllocation = false;
    12280  prefersDedicatedAllocation = false;
    12281  }
    12282 }
    12283 
    12284 VkResult VmaAllocator_T::AllocateMemory(
    12285  const VkMemoryRequirements& vkMemReq,
    12286  bool requiresDedicatedAllocation,
    12287  bool prefersDedicatedAllocation,
    12288  VkBuffer dedicatedBuffer,
    12289  VkImage dedicatedImage,
    12290  const VmaAllocationCreateInfo& createInfo,
    12291  VmaSuballocationType suballocType,
    12292  VmaAllocation* pAllocation)
    12293 {
    12294  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12295 
    12296  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12297  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12298  {
    12299  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12300  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12301  }
    12302  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12304  {
    12305  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12306  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12307  }
    12308  if(requiresDedicatedAllocation)
    12309  {
    12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12311  {
    12312  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12313  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12314  }
    12315  if(createInfo.pool != VK_NULL_HANDLE)
    12316  {
    12317  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12318  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12319  }
    12320  }
    12321  if((createInfo.pool != VK_NULL_HANDLE) &&
    12322  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12323  {
    12324  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12326  }
    12327 
    12328  if(createInfo.pool != VK_NULL_HANDLE)
    12329  {
    12330  const VkDeviceSize alignmentForPool = VMA_MAX(
    12331  vkMemReq.alignment,
    12332  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12333  return createInfo.pool->m_BlockVector.Allocate(
    12334  createInfo.pool,
    12335  m_CurrentFrameIndex.load(),
    12336  vkMemReq.size,
    12337  alignmentForPool,
    12338  createInfo,
    12339  suballocType,
    12340  pAllocation);
    12341  }
    12342  else
    12343  {
    12344  // Bit mask of memory Vulkan types acceptable for this allocation.
    12345  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12346  uint32_t memTypeIndex = UINT32_MAX;
    12347  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12348  if(res == VK_SUCCESS)
    12349  {
    12350  VkDeviceSize alignmentForMemType = VMA_MAX(
    12351  vkMemReq.alignment,
    12352  GetMemoryTypeMinAlignment(memTypeIndex));
    12353 
    12354  res = AllocateMemoryOfType(
    12355  vkMemReq.size,
    12356  alignmentForMemType,
    12357  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12358  dedicatedBuffer,
    12359  dedicatedImage,
    12360  createInfo,
    12361  memTypeIndex,
    12362  suballocType,
    12363  pAllocation);
    12364  // Succeeded on first try.
    12365  if(res == VK_SUCCESS)
    12366  {
    12367  return res;
    12368  }
    12369  // Allocation from this memory type failed. Try other compatible memory types.
    12370  else
    12371  {
    12372  for(;;)
    12373  {
    12374  // Remove old memTypeIndex from list of possibilities.
    12375  memoryTypeBits &= ~(1u << memTypeIndex);
    12376  // Find alternative memTypeIndex.
    12377  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12378  if(res == VK_SUCCESS)
    12379  {
    12380  alignmentForMemType = VMA_MAX(
    12381  vkMemReq.alignment,
    12382  GetMemoryTypeMinAlignment(memTypeIndex));
    12383 
    12384  res = AllocateMemoryOfType(
    12385  vkMemReq.size,
    12386  alignmentForMemType,
    12387  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12388  dedicatedBuffer,
    12389  dedicatedImage,
    12390  createInfo,
    12391  memTypeIndex,
    12392  suballocType,
    12393  pAllocation);
    12394  // Allocation from this alternative memory type succeeded.
    12395  if(res == VK_SUCCESS)
    12396  {
    12397  return res;
    12398  }
    12399  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12400  }
    12401  // No other matching memory type index could be found.
    12402  else
    12403  {
    12404  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12405  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12406  }
    12407  }
    12408  }
    12409  }
    12410  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12411  else
    12412  return res;
    12413  }
    12414 }
    12415 
    12416 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12417 {
    12418  VMA_ASSERT(allocation);
    12419 
    12420  if(TouchAllocation(allocation))
    12421  {
    12422  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12423  {
    12424  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12425  }
    12426 
    12427  switch(allocation->GetType())
    12428  {
    12429  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12430  {
    12431  VmaBlockVector* pBlockVector = VMA_NULL;
    12432  VmaPool hPool = allocation->GetPool();
    12433  if(hPool != VK_NULL_HANDLE)
    12434  {
    12435  pBlockVector = &hPool->m_BlockVector;
    12436  }
    12437  else
    12438  {
    12439  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12440  pBlockVector = m_pBlockVectors[memTypeIndex];
    12441  }
    12442  pBlockVector->Free(allocation);
    12443  }
    12444  break;
    12445  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12446  FreeDedicatedMemory(allocation);
    12447  break;
    12448  default:
    12449  VMA_ASSERT(0);
    12450  }
    12451  }
    12452 
    12453  allocation->SetUserData(this, VMA_NULL);
    12454  vma_delete(this, allocation);
    12455 }
    12456 
    12457 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12458 {
    12459  // Initialize.
    12460  InitStatInfo(pStats->total);
    12461  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12462  InitStatInfo(pStats->memoryType[i]);
    12463  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12464  InitStatInfo(pStats->memoryHeap[i]);
    12465 
    12466  // Process default pools.
    12467  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12468  {
    12469  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12470  VMA_ASSERT(pBlockVector);
    12471  pBlockVector->AddStats(pStats);
    12472  }
    12473 
    12474  // Process custom pools.
    12475  {
    12476  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12477  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12478  {
    12479  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12480  }
    12481  }
    12482 
    12483  // Process dedicated allocations.
    12484  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12485  {
    12486  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12487  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12488  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12489  VMA_ASSERT(pDedicatedAllocVector);
    12490  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12491  {
    12492  VmaStatInfo allocationStatInfo;
    12493  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12494  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12495  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12496  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12497  }
    12498  }
    12499 
    12500  // Postprocess.
    12501  VmaPostprocessCalcStatInfo(pStats->total);
    12502  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12503  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12504  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12505  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12506 }
    12507 
    12508 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12509 
    12510 VkResult VmaAllocator_T::Defragment(
    12511  VmaAllocation* pAllocations,
    12512  size_t allocationCount,
    12513  VkBool32* pAllocationsChanged,
    12514  const VmaDefragmentationInfo* pDefragmentationInfo,
    12515  VmaDefragmentationStats* pDefragmentationStats)
    12516 {
    12517  if(pAllocationsChanged != VMA_NULL)
    12518  {
    12519  memset(pAllocationsChanged, 0, sizeof(*pAllocationsChanged));
    12520  }
    12521  if(pDefragmentationStats != VMA_NULL)
    12522  {
    12523  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12524  }
    12525 
    12526  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12527 
    12528  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12529 
    12530  const size_t poolCount = m_Pools.size();
    12531 
    12532  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12533  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12534  {
    12535  VmaAllocation hAlloc = pAllocations[allocIndex];
    12536  VMA_ASSERT(hAlloc);
    12537  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12538  // DedicatedAlloc cannot be defragmented.
    12539  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12540  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12541  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12542  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12543  // Lost allocation cannot be defragmented.
    12544  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12545  {
    12546  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12547 
    12548  const VmaPool hAllocPool = hAlloc->GetPool();
    12549  // This allocation belongs to custom pool.
    12550  if(hAllocPool != VK_NULL_HANDLE)
    12551  {
    12552  // Pools with linear or buddy algorithm are not defragmented.
    12553  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12554  {
    12555  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12556  }
    12557  }
    12558  // This allocation belongs to general pool.
    12559  else
    12560  {
    12561  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12562  }
    12563 
    12564  if(pAllocBlockVector != VMA_NULL)
    12565  {
    12566  VmaDefragmentator* const pDefragmentator =
    12567  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12568  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12569  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12570  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12571  }
    12572  }
    12573  }
    12574 
    12575  VkResult result = VK_SUCCESS;
    12576 
    12577  // ======== Main processing.
    12578 
    12579  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12580  uint32_t maxAllocationsToMove = UINT32_MAX;
    12581  if(pDefragmentationInfo != VMA_NULL)
    12582  {
    12583  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12584  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12585  }
    12586 
    12587  // Process standard memory.
    12588  for(uint32_t memTypeIndex = 0;
    12589  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12590  ++memTypeIndex)
    12591  {
    12592  // Only HOST_VISIBLE memory types can be defragmented.
    12593  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12594  {
    12595  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12596  pDefragmentationStats,
    12597  maxBytesToMove,
    12598  maxAllocationsToMove);
    12599  }
    12600  }
    12601 
    12602  // Process custom pools.
    12603  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12604  {
    12605  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12606  pDefragmentationStats,
    12607  maxBytesToMove,
    12608  maxAllocationsToMove);
    12609  }
    12610 
    12611  // ======== Destroy defragmentators.
    12612 
    12613  // Process custom pools.
    12614  for(size_t poolIndex = poolCount; poolIndex--; )
    12615  {
    12616  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12617  }
    12618 
    12619  // Process standard memory.
    12620  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12621  {
    12622  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12623  {
    12624  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12625  }
    12626  }
    12627 
    12628  return result;
    12629 }
    12630 
    12631 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12632 {
    12633  if(hAllocation->CanBecomeLost())
    12634  {
    12635  /*
    12636  Warning: This is a carefully designed algorithm.
    12637  Do not modify unless you really know what you're doing :)
    12638  */
    12639  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12640  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12641  for(;;)
    12642  {
    12643  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12644  {
    12645  pAllocationInfo->memoryType = UINT32_MAX;
    12646  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12647  pAllocationInfo->offset = 0;
    12648  pAllocationInfo->size = hAllocation->GetSize();
    12649  pAllocationInfo->pMappedData = VMA_NULL;
    12650  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12651  return;
    12652  }
    12653  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12654  {
    12655  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12656  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12657  pAllocationInfo->offset = hAllocation->GetOffset();
    12658  pAllocationInfo->size = hAllocation->GetSize();
    12659  pAllocationInfo->pMappedData = VMA_NULL;
    12660  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12661  return;
    12662  }
    12663  else // Last use time earlier than current time.
    12664  {
    12665  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12666  {
    12667  localLastUseFrameIndex = localCurrFrameIndex;
    12668  }
    12669  }
    12670  }
    12671  }
    12672  else
    12673  {
    12674 #if VMA_STATS_STRING_ENABLED
    12675  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12676  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12677  for(;;)
    12678  {
    12679  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12680  if(localLastUseFrameIndex == localCurrFrameIndex)
    12681  {
    12682  break;
    12683  }
    12684  else // Last use time earlier than current time.
    12685  {
    12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12687  {
    12688  localLastUseFrameIndex = localCurrFrameIndex;
    12689  }
    12690  }
    12691  }
    12692 #endif
    12693 
    12694  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12695  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12696  pAllocationInfo->offset = hAllocation->GetOffset();
    12697  pAllocationInfo->size = hAllocation->GetSize();
    12698  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12699  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12700  }
    12701 }
    12702 
    12703 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12704 {
    12705  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12706  if(hAllocation->CanBecomeLost())
    12707  {
    12708  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12709  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12710  for(;;)
    12711  {
    12712  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12713  {
    12714  return false;
    12715  }
    12716  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12717  {
    12718  return true;
    12719  }
    12720  else // Last use time earlier than current time.
    12721  {
    12722  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12723  {
    12724  localLastUseFrameIndex = localCurrFrameIndex;
    12725  }
    12726  }
    12727  }
    12728  }
    12729  else
    12730  {
    12731 #if VMA_STATS_STRING_ENABLED
    12732  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12733  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12734  for(;;)
    12735  {
    12736  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12737  if(localLastUseFrameIndex == localCurrFrameIndex)
    12738  {
    12739  break;
    12740  }
    12741  else // Last use time earlier than current time.
    12742  {
    12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12744  {
    12745  localLastUseFrameIndex = localCurrFrameIndex;
    12746  }
    12747  }
    12748  }
    12749 #endif
    12750 
    12751  return true;
    12752  }
    12753 }
    12754 
    12755 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12756 {
    12757  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12758 
    12759  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12760 
    12761  if(newCreateInfo.maxBlockCount == 0)
    12762  {
    12763  newCreateInfo.maxBlockCount = SIZE_MAX;
    12764  }
    12765  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12766  {
    12767  return VK_ERROR_INITIALIZATION_FAILED;
    12768  }
    12769 
    12770  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12771 
    12772  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12773 
    12774  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12775  if(res != VK_SUCCESS)
    12776  {
    12777  vma_delete(this, *pPool);
    12778  *pPool = VMA_NULL;
    12779  return res;
    12780  }
    12781 
    12782  // Add to m_Pools.
    12783  {
    12784  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12785  (*pPool)->SetId(m_NextPoolId++);
    12786  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12787  }
    12788 
    12789  return VK_SUCCESS;
    12790 }
    12791 
    12792 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12793 {
    12794  // Remove from m_Pools.
    12795  {
    12796  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12797  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12798  VMA_ASSERT(success && "Pool not found in Allocator.");
    12799  }
    12800 
    12801  vma_delete(this, pool);
    12802 }
    12803 
    12804 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12805 {
    12806  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12807 }
    12808 
    12809 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12810 {
    12811  m_CurrentFrameIndex.store(frameIndex);
    12812 }
    12813 
    12814 void VmaAllocator_T::MakePoolAllocationsLost(
    12815  VmaPool hPool,
    12816  size_t* pLostAllocationCount)
    12817 {
    12818  hPool->m_BlockVector.MakePoolAllocationsLost(
    12819  m_CurrentFrameIndex.load(),
    12820  pLostAllocationCount);
    12821 }
    12822 
    12823 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12824 {
    12825  return hPool->m_BlockVector.CheckCorruption();
    12826 }
    12827 
    12828 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12829 {
    12830  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12831 
    12832  // Process default pools.
    12833  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12834  {
    12835  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12836  {
    12837  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12838  VMA_ASSERT(pBlockVector);
    12839  VkResult localRes = pBlockVector->CheckCorruption();
    12840  switch(localRes)
    12841  {
    12842  case VK_ERROR_FEATURE_NOT_PRESENT:
    12843  break;
    12844  case VK_SUCCESS:
    12845  finalRes = VK_SUCCESS;
    12846  break;
    12847  default:
    12848  return localRes;
    12849  }
    12850  }
    12851  }
    12852 
    12853  // Process custom pools.
    12854  {
    12855  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12856  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12857  {
    12858  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12859  {
    12860  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12861  switch(localRes)
    12862  {
    12863  case VK_ERROR_FEATURE_NOT_PRESENT:
    12864  break;
    12865  case VK_SUCCESS:
    12866  finalRes = VK_SUCCESS;
    12867  break;
    12868  default:
    12869  return localRes;
    12870  }
    12871  }
    12872  }
    12873  }
    12874 
    12875  return finalRes;
    12876 }
    12877 
    12878 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12879 {
    12880  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12881  (*pAllocation)->InitLost();
    12882 }
    12883 
    12884 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12885 {
    12886  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12887 
    12888  VkResult res;
    12889  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12890  {
    12891  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12892  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12893  {
    12894  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12895  if(res == VK_SUCCESS)
    12896  {
    12897  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12898  }
    12899  }
    12900  else
    12901  {
    12902  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12903  }
    12904  }
    12905  else
    12906  {
    12907  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12908  }
    12909 
    12910  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12911  {
    12912  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12913  }
    12914 
    12915  return res;
    12916 }
    12917 
    12918 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12919 {
    12920  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12921  {
    12922  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12923  }
    12924 
    12925  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12926 
    12927  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12928  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12929  {
    12930  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12931  m_HeapSizeLimit[heapIndex] += size;
    12932  }
    12933 }
    12934 
    12935 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12936 {
    12937  if(hAllocation->CanBecomeLost())
    12938  {
    12939  return VK_ERROR_MEMORY_MAP_FAILED;
    12940  }
    12941 
    12942  switch(hAllocation->GetType())
    12943  {
    12944  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12945  {
    12946  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12947  char *pBytes = VMA_NULL;
    12948  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12949  if(res == VK_SUCCESS)
    12950  {
    12951  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12952  hAllocation->BlockAllocMap();
    12953  }
    12954  return res;
    12955  }
    12956  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12957  return hAllocation->DedicatedAllocMap(this, ppData);
    12958  default:
    12959  VMA_ASSERT(0);
    12960  return VK_ERROR_MEMORY_MAP_FAILED;
    12961  }
    12962 }
    12963 
    12964 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12965 {
    12966  switch(hAllocation->GetType())
    12967  {
    12968  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12969  {
    12970  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12971  hAllocation->BlockAllocUnmap();
    12972  pBlock->Unmap(this, 1);
    12973  }
    12974  break;
    12975  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12976  hAllocation->DedicatedAllocUnmap(this);
    12977  break;
    12978  default:
    12979  VMA_ASSERT(0);
    12980  }
    12981 }
    12982 
    12983 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12984 {
    12985  VkResult res = VK_SUCCESS;
    12986  switch(hAllocation->GetType())
    12987  {
    12988  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12989  res = GetVulkanFunctions().vkBindBufferMemory(
    12990  m_hDevice,
    12991  hBuffer,
    12992  hAllocation->GetMemory(),
    12993  0); //memoryOffset
    12994  break;
    12995  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12996  {
    12997  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    12998  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    12999  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13000  break;
    13001  }
    13002  default:
    13003  VMA_ASSERT(0);
    13004  }
    13005  return res;
    13006 }
    13007 
    13008 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13009 {
    13010  VkResult res = VK_SUCCESS;
    13011  switch(hAllocation->GetType())
    13012  {
    13013  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13014  res = GetVulkanFunctions().vkBindImageMemory(
    13015  m_hDevice,
    13016  hImage,
    13017  hAllocation->GetMemory(),
    13018  0); //memoryOffset
    13019  break;
    13020  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13021  {
    13022  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13023  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13024  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13025  break;
    13026  }
    13027  default:
    13028  VMA_ASSERT(0);
    13029  }
    13030  return res;
    13031 }
    13032 
    13033 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13034  VmaAllocation hAllocation,
    13035  VkDeviceSize offset, VkDeviceSize size,
    13036  VMA_CACHE_OPERATION op)
    13037 {
    13038  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13039  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13040  {
    13041  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13042  VMA_ASSERT(offset <= allocationSize);
    13043 
    13044  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13045 
    13046  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13047  memRange.memory = hAllocation->GetMemory();
    13048 
    13049  switch(hAllocation->GetType())
    13050  {
    13051  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13052  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13053  if(size == VK_WHOLE_SIZE)
    13054  {
    13055  memRange.size = allocationSize - memRange.offset;
    13056  }
    13057  else
    13058  {
    13059  VMA_ASSERT(offset + size <= allocationSize);
    13060  memRange.size = VMA_MIN(
    13061  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13062  allocationSize - memRange.offset);
    13063  }
    13064  break;
    13065 
    13066  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13067  {
    13068  // 1. Still within this allocation.
    13069  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13070  if(size == VK_WHOLE_SIZE)
    13071  {
    13072  size = allocationSize - offset;
    13073  }
    13074  else
    13075  {
    13076  VMA_ASSERT(offset + size <= allocationSize);
    13077  }
    13078  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13079 
    13080  // 2. Adjust to whole block.
    13081  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13082  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13083  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13084  memRange.offset += allocationOffset;
    13085  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13086 
    13087  break;
    13088  }
    13089 
    13090  default:
    13091  VMA_ASSERT(0);
    13092  }
    13093 
    13094  switch(op)
    13095  {
    13096  case VMA_CACHE_FLUSH:
    13097  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13098  break;
    13099  case VMA_CACHE_INVALIDATE:
    13100  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13101  break;
    13102  default:
    13103  VMA_ASSERT(0);
    13104  }
    13105  }
    13106  // else: Just ignore this call.
    13107 }
    13108 
    13109 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13110 {
    13111  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13112 
    13113  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13114  {
    13115  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13116  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13117  VMA_ASSERT(pDedicatedAllocations);
    13118  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13119  VMA_ASSERT(success);
    13120  }
    13121 
    13122  VkDeviceMemory hMemory = allocation->GetMemory();
    13123 
    13124  /*
    13125  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13126  before vkFreeMemory.
    13127 
    13128  if(allocation->GetMappedData() != VMA_NULL)
    13129  {
    13130  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13131  }
    13132  */
    13133 
    13134  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13135 
    13136  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13137 }
    13138 
    13139 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13140 {
    13141  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13142  !hAllocation->CanBecomeLost() &&
    13143  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13144  {
    13145  void* pData = VMA_NULL;
    13146  VkResult res = Map(hAllocation, &pData);
    13147  if(res == VK_SUCCESS)
    13148  {
    13149  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13150  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13151  Unmap(hAllocation);
    13152  }
    13153  else
    13154  {
    13155  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13156  }
    13157  }
    13158 }
    13159 
    13160 #if VMA_STATS_STRING_ENABLED
    13161 
    13162 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13163 {
    13164  bool dedicatedAllocationsStarted = false;
    13165  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13166  {
    13167  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13168  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13169  VMA_ASSERT(pDedicatedAllocVector);
    13170  if(pDedicatedAllocVector->empty() == false)
    13171  {
    13172  if(dedicatedAllocationsStarted == false)
    13173  {
    13174  dedicatedAllocationsStarted = true;
    13175  json.WriteString("DedicatedAllocations");
    13176  json.BeginObject();
    13177  }
    13178 
    13179  json.BeginString("Type ");
    13180  json.ContinueString(memTypeIndex);
    13181  json.EndString();
    13182 
    13183  json.BeginArray();
    13184 
    13185  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13186  {
    13187  json.BeginObject(true);
    13188  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13189  hAlloc->PrintParameters(json);
    13190  json.EndObject();
    13191  }
    13192 
    13193  json.EndArray();
    13194  }
    13195  }
    13196  if(dedicatedAllocationsStarted)
    13197  {
    13198  json.EndObject();
    13199  }
    13200 
    13201  {
    13202  bool allocationsStarted = false;
    13203  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13204  {
    13205  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13206  {
    13207  if(allocationsStarted == false)
    13208  {
    13209  allocationsStarted = true;
    13210  json.WriteString("DefaultPools");
    13211  json.BeginObject();
    13212  }
    13213 
    13214  json.BeginString("Type ");
    13215  json.ContinueString(memTypeIndex);
    13216  json.EndString();
    13217 
    13218  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13219  }
    13220  }
    13221  if(allocationsStarted)
    13222  {
    13223  json.EndObject();
    13224  }
    13225  }
    13226 
    13227  // Custom pools
    13228  {
    13229  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13230  const size_t poolCount = m_Pools.size();
    13231  if(poolCount > 0)
    13232  {
    13233  json.WriteString("Pools");
    13234  json.BeginObject();
    13235  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13236  {
    13237  json.BeginString();
    13238  json.ContinueString(m_Pools[poolIndex]->GetId());
    13239  json.EndString();
    13240 
    13241  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13242  }
    13243  json.EndObject();
    13244  }
    13245  }
    13246 }
    13247 
    13248 #endif // #if VMA_STATS_STRING_ENABLED
    13249 
    13251 // Public interface
    13252 
    13253 VkResult vmaCreateAllocator(
    13254  const VmaAllocatorCreateInfo* pCreateInfo,
    13255  VmaAllocator* pAllocator)
    13256 {
    13257  VMA_ASSERT(pCreateInfo && pAllocator);
    13258  VMA_DEBUG_LOG("vmaCreateAllocator");
    13259  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13260  return (*pAllocator)->Init(pCreateInfo);
    13261 }
    13262 
    13263 void vmaDestroyAllocator(
    13264  VmaAllocator allocator)
    13265 {
    13266  if(allocator != VK_NULL_HANDLE)
    13267  {
    13268  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13269  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13270  vma_delete(&allocationCallbacks, allocator);
    13271  }
    13272 }
    13273 
    13275  VmaAllocator allocator,
    13276  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13277 {
    13278  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13279  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13280 }
    13281 
    13283  VmaAllocator allocator,
    13284  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13285 {
    13286  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13287  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13288 }
    13289 
    13291  VmaAllocator allocator,
    13292  uint32_t memoryTypeIndex,
    13293  VkMemoryPropertyFlags* pFlags)
    13294 {
    13295  VMA_ASSERT(allocator && pFlags);
    13296  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13297  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13298 }
    13299 
    13301  VmaAllocator allocator,
    13302  uint32_t frameIndex)
    13303 {
    13304  VMA_ASSERT(allocator);
    13305  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13306 
    13307  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13308 
    13309  allocator->SetCurrentFrameIndex(frameIndex);
    13310 }
    13311 
    13312 void vmaCalculateStats(
    13313  VmaAllocator allocator,
    13314  VmaStats* pStats)
    13315 {
    13316  VMA_ASSERT(allocator && pStats);
    13317  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13318  allocator->CalculateStats(pStats);
    13319 }
    13320 
    13321 #if VMA_STATS_STRING_ENABLED
    13322 
    13323 void vmaBuildStatsString(
    13324  VmaAllocator allocator,
    13325  char** ppStatsString,
    13326  VkBool32 detailedMap)
    13327 {
    13328  VMA_ASSERT(allocator && ppStatsString);
    13329  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13330 
    13331  VmaStringBuilder sb(allocator);
    13332  {
    13333  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13334  json.BeginObject();
    13335 
    13336  VmaStats stats;
    13337  allocator->CalculateStats(&stats);
    13338 
    13339  json.WriteString("Total");
    13340  VmaPrintStatInfo(json, stats.total);
    13341 
    13342  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13343  {
    13344  json.BeginString("Heap ");
    13345  json.ContinueString(heapIndex);
    13346  json.EndString();
    13347  json.BeginObject();
    13348 
    13349  json.WriteString("Size");
    13350  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13351 
    13352  json.WriteString("Flags");
    13353  json.BeginArray(true);
    13354  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13355  {
    13356  json.WriteString("DEVICE_LOCAL");
    13357  }
    13358  json.EndArray();
    13359 
    13360  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13361  {
    13362  json.WriteString("Stats");
    13363  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13364  }
    13365 
    13366  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13367  {
    13368  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13369  {
    13370  json.BeginString("Type ");
    13371  json.ContinueString(typeIndex);
    13372  json.EndString();
    13373 
    13374  json.BeginObject();
    13375 
    13376  json.WriteString("Flags");
    13377  json.BeginArray(true);
    13378  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13379  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13380  {
    13381  json.WriteString("DEVICE_LOCAL");
    13382  }
    13383  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13384  {
    13385  json.WriteString("HOST_VISIBLE");
    13386  }
    13387  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13388  {
    13389  json.WriteString("HOST_COHERENT");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_CACHED");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13396  {
    13397  json.WriteString("LAZILY_ALLOCATED");
    13398  }
    13399  json.EndArray();
    13400 
    13401  if(stats.memoryType[typeIndex].blockCount > 0)
    13402  {
    13403  json.WriteString("Stats");
    13404  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13405  }
    13406 
    13407  json.EndObject();
    13408  }
    13409  }
    13410 
    13411  json.EndObject();
    13412  }
    13413  if(detailedMap == VK_TRUE)
    13414  {
    13415  allocator->PrintDetailedMap(json);
    13416  }
    13417 
    13418  json.EndObject();
    13419  }
    13420 
    13421  const size_t len = sb.GetLength();
    13422  char* const pChars = vma_new_array(allocator, char, len + 1);
    13423  if(len > 0)
    13424  {
    13425  memcpy(pChars, sb.GetData(), len);
    13426  }
    13427  pChars[len] = '\0';
    13428  *ppStatsString = pChars;
    13429 }
    13430 
    13431 void vmaFreeStatsString(
    13432  VmaAllocator allocator,
    13433  char* pStatsString)
    13434 {
    13435  if(pStatsString != VMA_NULL)
    13436  {
    13437  VMA_ASSERT(allocator);
    13438  size_t len = strlen(pStatsString);
    13439  vma_delete_array(allocator, pStatsString, len + 1);
    13440  }
    13441 }
    13442 
    13443 #endif // #if VMA_STATS_STRING_ENABLED
    13444 
    13445 /*
    13446 This function is not protected by any mutex because it just reads immutable data.
    13447 */
    13448 VkResult vmaFindMemoryTypeIndex(
    13449  VmaAllocator allocator,
    13450  uint32_t memoryTypeBits,
    13451  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13452  uint32_t* pMemoryTypeIndex)
    13453 {
    13454  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13455  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13456  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13457 
    13458  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13459  {
    13460  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13461  }
    13462 
    13463  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13464  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13465 
    13466  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13467  if(mapped)
    13468  {
    13469  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13470  }
    13471 
    13472  // Convert usage to requiredFlags and preferredFlags.
    13473  switch(pAllocationCreateInfo->usage)
    13474  {
    13476  break;
    13478  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13479  {
    13480  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13481  }
    13482  break;
    13484  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13485  break;
    13487  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13488  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13489  {
    13490  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13491  }
    13492  break;
    13494  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13495  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13496  break;
    13497  default:
    13498  break;
    13499  }
    13500 
    13501  *pMemoryTypeIndex = UINT32_MAX;
    13502  uint32_t minCost = UINT32_MAX;
    13503  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13504  memTypeIndex < allocator->GetMemoryTypeCount();
    13505  ++memTypeIndex, memTypeBit <<= 1)
    13506  {
    13507  // This memory type is acceptable according to memoryTypeBits bitmask.
    13508  if((memTypeBit & memoryTypeBits) != 0)
    13509  {
    13510  const VkMemoryPropertyFlags currFlags =
    13511  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13512  // This memory type contains requiredFlags.
    13513  if((requiredFlags & ~currFlags) == 0)
    13514  {
    13515  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13516  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13517  // Remember memory type with lowest cost.
    13518  if(currCost < minCost)
    13519  {
    13520  *pMemoryTypeIndex = memTypeIndex;
    13521  if(currCost == 0)
    13522  {
    13523  return VK_SUCCESS;
    13524  }
    13525  minCost = currCost;
    13526  }
    13527  }
    13528  }
    13529  }
    13530  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13531 }
    13532 
    13534  VmaAllocator allocator,
    13535  const VkBufferCreateInfo* pBufferCreateInfo,
    13536  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13537  uint32_t* pMemoryTypeIndex)
    13538 {
    13539  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13540  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13541  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13542  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13543 
    13544  const VkDevice hDev = allocator->m_hDevice;
    13545  VkBuffer hBuffer = VK_NULL_HANDLE;
    13546  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13547  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13548  if(res == VK_SUCCESS)
    13549  {
    13550  VkMemoryRequirements memReq = {};
    13551  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13552  hDev, hBuffer, &memReq);
    13553 
    13554  res = vmaFindMemoryTypeIndex(
    13555  allocator,
    13556  memReq.memoryTypeBits,
    13557  pAllocationCreateInfo,
    13558  pMemoryTypeIndex);
    13559 
    13560  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13561  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13562  }
    13563  return res;
    13564 }
    13565 
    13567  VmaAllocator allocator,
    13568  const VkImageCreateInfo* pImageCreateInfo,
    13569  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13570  uint32_t* pMemoryTypeIndex)
    13571 {
    13572  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13573  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13574  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13575  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13576 
    13577  const VkDevice hDev = allocator->m_hDevice;
    13578  VkImage hImage = VK_NULL_HANDLE;
    13579  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13580  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13581  if(res == VK_SUCCESS)
    13582  {
    13583  VkMemoryRequirements memReq = {};
    13584  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13585  hDev, hImage, &memReq);
    13586 
    13587  res = vmaFindMemoryTypeIndex(
    13588  allocator,
    13589  memReq.memoryTypeBits,
    13590  pAllocationCreateInfo,
    13591  pMemoryTypeIndex);
    13592 
    13593  allocator->GetVulkanFunctions().vkDestroyImage(
    13594  hDev, hImage, allocator->GetAllocationCallbacks());
    13595  }
    13596  return res;
    13597 }
    13598 
    13599 VkResult vmaCreatePool(
    13600  VmaAllocator allocator,
    13601  const VmaPoolCreateInfo* pCreateInfo,
    13602  VmaPool* pPool)
    13603 {
    13604  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13605 
    13606  VMA_DEBUG_LOG("vmaCreatePool");
    13607 
    13608  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13609 
    13610  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13611 
    13612 #if VMA_RECORDING_ENABLED
    13613  if(allocator->GetRecorder() != VMA_NULL)
    13614  {
    13615  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13616  }
    13617 #endif
    13618 
    13619  return res;
    13620 }
    13621 
    13622 void vmaDestroyPool(
    13623  VmaAllocator allocator,
    13624  VmaPool pool)
    13625 {
    13626  VMA_ASSERT(allocator);
    13627 
    13628  if(pool == VK_NULL_HANDLE)
    13629  {
    13630  return;
    13631  }
    13632 
    13633  VMA_DEBUG_LOG("vmaDestroyPool");
    13634 
    13635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13636 
    13637 #if VMA_RECORDING_ENABLED
    13638  if(allocator->GetRecorder() != VMA_NULL)
    13639  {
    13640  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13641  }
    13642 #endif
    13643 
    13644  allocator->DestroyPool(pool);
    13645 }
    13646 
    13647 void vmaGetPoolStats(
    13648  VmaAllocator allocator,
    13649  VmaPool pool,
    13650  VmaPoolStats* pPoolStats)
    13651 {
    13652  VMA_ASSERT(allocator && pool && pPoolStats);
    13653 
    13654  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13655 
    13656  allocator->GetPoolStats(pool, pPoolStats);
    13657 }
    13658 
    13660  VmaAllocator allocator,
    13661  VmaPool pool,
    13662  size_t* pLostAllocationCount)
    13663 {
    13664  VMA_ASSERT(allocator && pool);
    13665 
    13666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13667 
    13668 #if VMA_RECORDING_ENABLED
    13669  if(allocator->GetRecorder() != VMA_NULL)
    13670  {
    13671  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13672  }
    13673 #endif
    13674 
    13675  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13676 }
    13677 
    13678 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13679 {
    13680  VMA_ASSERT(allocator && pool);
    13681 
    13682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13683 
    13684  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13685 
    13686  return allocator->CheckPoolCorruption(pool);
    13687 }
    13688 
    13689 VkResult vmaAllocateMemory(
    13690  VmaAllocator allocator,
    13691  const VkMemoryRequirements* pVkMemoryRequirements,
    13692  const VmaAllocationCreateInfo* pCreateInfo,
    13693  VmaAllocation* pAllocation,
    13694  VmaAllocationInfo* pAllocationInfo)
    13695 {
    13696  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13697 
    13698  VMA_DEBUG_LOG("vmaAllocateMemory");
    13699 
    13700  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13701 
    13702  VkResult result = allocator->AllocateMemory(
    13703  *pVkMemoryRequirements,
    13704  false, // requiresDedicatedAllocation
    13705  false, // prefersDedicatedAllocation
    13706  VK_NULL_HANDLE, // dedicatedBuffer
    13707  VK_NULL_HANDLE, // dedicatedImage
    13708  *pCreateInfo,
    13709  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13710  pAllocation);
    13711 
    13712 #if VMA_RECORDING_ENABLED
    13713  if(allocator->GetRecorder() != VMA_NULL)
    13714  {
    13715  allocator->GetRecorder()->RecordAllocateMemory(
    13716  allocator->GetCurrentFrameIndex(),
    13717  *pVkMemoryRequirements,
    13718  *pCreateInfo,
    13719  *pAllocation);
    13720  }
    13721 #endif
    13722 
    13723  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13724  {
    13725  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13726  }
    13727 
    13728  return result;
    13729 }
    13730 
    13732  VmaAllocator allocator,
    13733  VkBuffer buffer,
    13734  const VmaAllocationCreateInfo* pCreateInfo,
    13735  VmaAllocation* pAllocation,
    13736  VmaAllocationInfo* pAllocationInfo)
    13737 {
    13738  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13739 
    13740  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13741 
    13742  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13743 
    13744  VkMemoryRequirements vkMemReq = {};
    13745  bool requiresDedicatedAllocation = false;
    13746  bool prefersDedicatedAllocation = false;
    13747  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13748  requiresDedicatedAllocation,
    13749  prefersDedicatedAllocation);
    13750 
    13751  VkResult result = allocator->AllocateMemory(
    13752  vkMemReq,
    13753  requiresDedicatedAllocation,
    13754  prefersDedicatedAllocation,
    13755  buffer, // dedicatedBuffer
    13756  VK_NULL_HANDLE, // dedicatedImage
    13757  *pCreateInfo,
    13758  VMA_SUBALLOCATION_TYPE_BUFFER,
    13759  pAllocation);
    13760 
    13761 #if VMA_RECORDING_ENABLED
    13762  if(allocator->GetRecorder() != VMA_NULL)
    13763  {
    13764  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13765  allocator->GetCurrentFrameIndex(),
    13766  vkMemReq,
    13767  requiresDedicatedAllocation,
    13768  prefersDedicatedAllocation,
    13769  *pCreateInfo,
    13770  *pAllocation);
    13771  }
    13772 #endif
    13773 
    13774  if(pAllocationInfo && result == VK_SUCCESS)
    13775  {
    13776  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13777  }
    13778 
    13779  return result;
    13780 }
    13781 
    13782 VkResult vmaAllocateMemoryForImage(
    13783  VmaAllocator allocator,
    13784  VkImage image,
    13785  const VmaAllocationCreateInfo* pCreateInfo,
    13786  VmaAllocation* pAllocation,
    13787  VmaAllocationInfo* pAllocationInfo)
    13788 {
    13789  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13790 
    13791  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13792 
    13793  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13794 
    13795  VkMemoryRequirements vkMemReq = {};
    13796  bool requiresDedicatedAllocation = false;
    13797  bool prefersDedicatedAllocation = false;
    13798  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13799  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13800 
    13801  VkResult result = allocator->AllocateMemory(
    13802  vkMemReq,
    13803  requiresDedicatedAllocation,
    13804  prefersDedicatedAllocation,
    13805  VK_NULL_HANDLE, // dedicatedBuffer
    13806  image, // dedicatedImage
    13807  *pCreateInfo,
    13808  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13809  pAllocation);
    13810 
    13811 #if VMA_RECORDING_ENABLED
    13812  if(allocator->GetRecorder() != VMA_NULL)
    13813  {
    13814  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13815  allocator->GetCurrentFrameIndex(),
    13816  vkMemReq,
    13817  requiresDedicatedAllocation,
    13818  prefersDedicatedAllocation,
    13819  *pCreateInfo,
    13820  *pAllocation);
    13821  }
    13822 #endif
    13823 
    13824  if(pAllocationInfo && result == VK_SUCCESS)
    13825  {
    13826  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13827  }
    13828 
    13829  return result;
    13830 }
    13831 
    13832 void vmaFreeMemory(
    13833  VmaAllocator allocator,
    13834  VmaAllocation allocation)
    13835 {
    13836  VMA_ASSERT(allocator);
    13837 
    13838  if(allocation == VK_NULL_HANDLE)
    13839  {
    13840  return;
    13841  }
    13842 
    13843  VMA_DEBUG_LOG("vmaFreeMemory");
    13844 
    13845  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13846 
    13847 #if VMA_RECORDING_ENABLED
    13848  if(allocator->GetRecorder() != VMA_NULL)
    13849  {
    13850  allocator->GetRecorder()->RecordFreeMemory(
    13851  allocator->GetCurrentFrameIndex(),
    13852  allocation);
    13853  }
    13854 #endif
    13855 
    13856  allocator->FreeMemory(allocation);
    13857 }
    13858 
    13860  VmaAllocator allocator,
    13861  VmaAllocation allocation,
    13862  VmaAllocationInfo* pAllocationInfo)
    13863 {
    13864  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13865 
    13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordGetAllocationInfo(
    13872  allocator->GetCurrentFrameIndex(),
    13873  allocation);
    13874  }
    13875 #endif
    13876 
    13877  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13878 }
    13879 
    13880 VkBool32 vmaTouchAllocation(
    13881  VmaAllocator allocator,
    13882  VmaAllocation allocation)
    13883 {
    13884  VMA_ASSERT(allocator && allocation);
    13885 
    13886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13887 
    13888 #if VMA_RECORDING_ENABLED
    13889  if(allocator->GetRecorder() != VMA_NULL)
    13890  {
    13891  allocator->GetRecorder()->RecordTouchAllocation(
    13892  allocator->GetCurrentFrameIndex(),
    13893  allocation);
    13894  }
    13895 #endif
    13896 
    13897  return allocator->TouchAllocation(allocation);
    13898 }
    13899 
    13901  VmaAllocator allocator,
    13902  VmaAllocation allocation,
    13903  void* pUserData)
    13904 {
    13905  VMA_ASSERT(allocator && allocation);
    13906 
    13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13908 
    13909  allocation->SetUserData(allocator, pUserData);
    13910 
    13911 #if VMA_RECORDING_ENABLED
    13912  if(allocator->GetRecorder() != VMA_NULL)
    13913  {
    13914  allocator->GetRecorder()->RecordSetAllocationUserData(
    13915  allocator->GetCurrentFrameIndex(),
    13916  allocation,
    13917  pUserData);
    13918  }
    13919 #endif
    13920 }
    13921 
    13923  VmaAllocator allocator,
    13924  VmaAllocation* pAllocation)
    13925 {
    13926  VMA_ASSERT(allocator && pAllocation);
    13927 
    13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13929 
    13930  allocator->CreateLostAllocation(pAllocation);
    13931 
    13932 #if VMA_RECORDING_ENABLED
    13933  if(allocator->GetRecorder() != VMA_NULL)
    13934  {
    13935  allocator->GetRecorder()->RecordCreateLostAllocation(
    13936  allocator->GetCurrentFrameIndex(),
    13937  *pAllocation);
    13938  }
    13939 #endif
    13940 }
    13941 
    13942 VkResult vmaMapMemory(
    13943  VmaAllocator allocator,
    13944  VmaAllocation allocation,
    13945  void** ppData)
    13946 {
    13947  VMA_ASSERT(allocator && allocation && ppData);
    13948 
    13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13950 
    13951  VkResult res = allocator->Map(allocation, ppData);
    13952 
    13953 #if VMA_RECORDING_ENABLED
    13954  if(allocator->GetRecorder() != VMA_NULL)
    13955  {
    13956  allocator->GetRecorder()->RecordMapMemory(
    13957  allocator->GetCurrentFrameIndex(),
    13958  allocation);
    13959  }
    13960 #endif
    13961 
    13962  return res;
    13963 }
    13964 
    13965 void vmaUnmapMemory(
    13966  VmaAllocator allocator,
    13967  VmaAllocation allocation)
    13968 {
    13969  VMA_ASSERT(allocator && allocation);
    13970 
    13971  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13972 
    13973 #if VMA_RECORDING_ENABLED
    13974  if(allocator->GetRecorder() != VMA_NULL)
    13975  {
    13976  allocator->GetRecorder()->RecordUnmapMemory(
    13977  allocator->GetCurrentFrameIndex(),
    13978  allocation);
    13979  }
    13980 #endif
    13981 
    13982  allocator->Unmap(allocation);
    13983 }
    13984 
    13985 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13986 {
    13987  VMA_ASSERT(allocator && allocation);
    13988 
    13989  VMA_DEBUG_LOG("vmaFlushAllocation");
    13990 
    13991  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13992 
    13993  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    13994 
    13995 #if VMA_RECORDING_ENABLED
    13996  if(allocator->GetRecorder() != VMA_NULL)
    13997  {
    13998  allocator->GetRecorder()->RecordFlushAllocation(
    13999  allocator->GetCurrentFrameIndex(),
    14000  allocation, offset, size);
    14001  }
    14002 #endif
    14003 }
    14004 
    14005 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14006 {
    14007  VMA_ASSERT(allocator && allocation);
    14008 
    14009  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14010 
    14011  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14012 
    14013  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14014 
    14015 #if VMA_RECORDING_ENABLED
    14016  if(allocator->GetRecorder() != VMA_NULL)
    14017  {
    14018  allocator->GetRecorder()->RecordInvalidateAllocation(
    14019  allocator->GetCurrentFrameIndex(),
    14020  allocation, offset, size);
    14021  }
    14022 #endif
    14023 }
    14024 
    14025 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14026 {
    14027  VMA_ASSERT(allocator);
    14028 
    14029  VMA_DEBUG_LOG("vmaCheckCorruption");
    14030 
    14031  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14032 
    14033  return allocator->CheckCorruption(memoryTypeBits);
    14034 }
    14035 
    14036 VkResult vmaDefragment(
    14037  VmaAllocator allocator,
    14038  VmaAllocation* pAllocations,
    14039  size_t allocationCount,
    14040  VkBool32* pAllocationsChanged,
    14041  const VmaDefragmentationInfo *pDefragmentationInfo,
    14042  VmaDefragmentationStats* pDefragmentationStats)
    14043 {
    14044  VMA_ASSERT(allocator && pAllocations);
    14045 
    14046  VMA_DEBUG_LOG("vmaDefragment");
    14047 
    14048  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14049 
    14050  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14051 }
    14052 
    14053 VkResult vmaBindBufferMemory(
    14054  VmaAllocator allocator,
    14055  VmaAllocation allocation,
    14056  VkBuffer buffer)
    14057 {
    14058  VMA_ASSERT(allocator && allocation && buffer);
    14059 
    14060  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14061 
    14062  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14063 
    14064  return allocator->BindBufferMemory(allocation, buffer);
    14065 }
    14066 
    14067 VkResult vmaBindImageMemory(
    14068  VmaAllocator allocator,
    14069  VmaAllocation allocation,
    14070  VkImage image)
    14071 {
    14072  VMA_ASSERT(allocator && allocation && image);
    14073 
    14074  VMA_DEBUG_LOG("vmaBindImageMemory");
    14075 
    14076  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14077 
    14078  return allocator->BindImageMemory(allocation, image);
    14079 }
    14080 
    14081 VkResult vmaCreateBuffer(
    14082  VmaAllocator allocator,
    14083  const VkBufferCreateInfo* pBufferCreateInfo,
    14084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14085  VkBuffer* pBuffer,
    14086  VmaAllocation* pAllocation,
    14087  VmaAllocationInfo* pAllocationInfo)
    14088 {
    14089  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14090 
    14091  VMA_DEBUG_LOG("vmaCreateBuffer");
    14092 
    14093  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14094 
    14095  *pBuffer = VK_NULL_HANDLE;
    14096  *pAllocation = VK_NULL_HANDLE;
    14097 
    14098  // 1. Create VkBuffer.
    14099  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14100  allocator->m_hDevice,
    14101  pBufferCreateInfo,
    14102  allocator->GetAllocationCallbacks(),
    14103  pBuffer);
    14104  if(res >= 0)
    14105  {
    14106  // 2. vkGetBufferMemoryRequirements.
    14107  VkMemoryRequirements vkMemReq = {};
    14108  bool requiresDedicatedAllocation = false;
    14109  bool prefersDedicatedAllocation = false;
    14110  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14111  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14112 
    14113  // Make sure alignment requirements for specific buffer usages reported
    14114  // in Physical Device Properties are included in alignment reported by memory requirements.
    14115  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14116  {
    14117  VMA_ASSERT(vkMemReq.alignment %
    14118  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14119  }
    14120  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14121  {
    14122  VMA_ASSERT(vkMemReq.alignment %
    14123  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14124  }
    14125  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14126  {
    14127  VMA_ASSERT(vkMemReq.alignment %
    14128  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14129  }
    14130 
    14131  // 3. Allocate memory using allocator.
    14132  res = allocator->AllocateMemory(
    14133  vkMemReq,
    14134  requiresDedicatedAllocation,
    14135  prefersDedicatedAllocation,
    14136  *pBuffer, // dedicatedBuffer
    14137  VK_NULL_HANDLE, // dedicatedImage
    14138  *pAllocationCreateInfo,
    14139  VMA_SUBALLOCATION_TYPE_BUFFER,
    14140  pAllocation);
    14141 
    14142 #if VMA_RECORDING_ENABLED
    14143  if(allocator->GetRecorder() != VMA_NULL)
    14144  {
    14145  allocator->GetRecorder()->RecordCreateBuffer(
    14146  allocator->GetCurrentFrameIndex(),
    14147  *pBufferCreateInfo,
    14148  *pAllocationCreateInfo,
    14149  *pAllocation);
    14150  }
    14151 #endif
    14152 
    14153  if(res >= 0)
    14154  {
    14155  // 3. Bind buffer with memory.
    14156  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14157  if(res >= 0)
    14158  {
    14159  // All steps succeeded.
    14160  #if VMA_STATS_STRING_ENABLED
    14161  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14162  #endif
    14163  if(pAllocationInfo != VMA_NULL)
    14164  {
    14165  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14166  }
    14167 
    14168  return VK_SUCCESS;
    14169  }
    14170  allocator->FreeMemory(*pAllocation);
    14171  *pAllocation = VK_NULL_HANDLE;
    14172  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14173  *pBuffer = VK_NULL_HANDLE;
    14174  return res;
    14175  }
    14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14177  *pBuffer = VK_NULL_HANDLE;
    14178  return res;
    14179  }
    14180  return res;
    14181 }
    14182 
    14183 void vmaDestroyBuffer(
    14184  VmaAllocator allocator,
    14185  VkBuffer buffer,
    14186  VmaAllocation allocation)
    14187 {
    14188  VMA_ASSERT(allocator);
    14189 
    14190  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14191  {
    14192  return;
    14193  }
    14194 
    14195  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14196 
    14197  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14198 
    14199 #if VMA_RECORDING_ENABLED
    14200  if(allocator->GetRecorder() != VMA_NULL)
    14201  {
    14202  allocator->GetRecorder()->RecordDestroyBuffer(
    14203  allocator->GetCurrentFrameIndex(),
    14204  allocation);
    14205  }
    14206 #endif
    14207 
    14208  if(buffer != VK_NULL_HANDLE)
    14209  {
    14210  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14211  }
    14212 
    14213  if(allocation != VK_NULL_HANDLE)
    14214  {
    14215  allocator->FreeMemory(allocation);
    14216  }
    14217 }
    14218 
    14219 VkResult vmaCreateImage(
    14220  VmaAllocator allocator,
    14221  const VkImageCreateInfo* pImageCreateInfo,
    14222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14223  VkImage* pImage,
    14224  VmaAllocation* pAllocation,
    14225  VmaAllocationInfo* pAllocationInfo)
    14226 {
    14227  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14228 
    14229  VMA_DEBUG_LOG("vmaCreateImage");
    14230 
    14231  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14232 
    14233  *pImage = VK_NULL_HANDLE;
    14234  *pAllocation = VK_NULL_HANDLE;
    14235 
    14236  // 1. Create VkImage.
    14237  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14238  allocator->m_hDevice,
    14239  pImageCreateInfo,
    14240  allocator->GetAllocationCallbacks(),
    14241  pImage);
    14242  if(res >= 0)
    14243  {
    14244  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14245  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14246  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14247 
    14248  // 2. Allocate memory using allocator.
    14249  VkMemoryRequirements vkMemReq = {};
    14250  bool requiresDedicatedAllocation = false;
    14251  bool prefersDedicatedAllocation = false;
    14252  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14253  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14254 
    14255  res = allocator->AllocateMemory(
    14256  vkMemReq,
    14257  requiresDedicatedAllocation,
    14258  prefersDedicatedAllocation,
    14259  VK_NULL_HANDLE, // dedicatedBuffer
    14260  *pImage, // dedicatedImage
    14261  *pAllocationCreateInfo,
    14262  suballocType,
    14263  pAllocation);
    14264 
    14265 #if VMA_RECORDING_ENABLED
    14266  if(allocator->GetRecorder() != VMA_NULL)
    14267  {
    14268  allocator->GetRecorder()->RecordCreateImage(
    14269  allocator->GetCurrentFrameIndex(),
    14270  *pImageCreateInfo,
    14271  *pAllocationCreateInfo,
    14272  *pAllocation);
    14273  }
    14274 #endif
    14275 
    14276  if(res >= 0)
    14277  {
    14278  // 3. Bind image with memory.
    14279  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14280  if(res >= 0)
    14281  {
    14282  // All steps succeeded.
    14283  #if VMA_STATS_STRING_ENABLED
    14284  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14285  #endif
    14286  if(pAllocationInfo != VMA_NULL)
    14287  {
    14288  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14289  }
    14290 
    14291  return VK_SUCCESS;
    14292  }
    14293  allocator->FreeMemory(*pAllocation);
    14294  *pAllocation = VK_NULL_HANDLE;
    14295  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14296  *pImage = VK_NULL_HANDLE;
    14297  return res;
    14298  }
    14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14300  *pImage = VK_NULL_HANDLE;
    14301  return res;
    14302  }
    14303  return res;
    14304 }
    14305 
    14306 void vmaDestroyImage(
    14307  VmaAllocator allocator,
    14308  VkImage image,
    14309  VmaAllocation allocation)
    14310 {
    14311  VMA_ASSERT(allocator);
    14312 
    14313  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14314  {
    14315  return;
    14316  }
    14317 
    14318  VMA_DEBUG_LOG("vmaDestroyImage");
    14319 
    14320  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14321 
    14322 #if VMA_RECORDING_ENABLED
    14323  if(allocator->GetRecorder() != VMA_NULL)
    14324  {
    14325  allocator->GetRecorder()->RecordDestroyImage(
    14326  allocator->GetCurrentFrameIndex(),
    14327  allocation);
    14328  }
    14329 #endif
    14330 
    14331  if(image != VK_NULL_HANDLE)
    14332  {
    14333  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14334  }
    14335  if(allocation != VK_NULL_HANDLE)
    14336  {
    14337  allocator->FreeMemory(allocation);
    14338  }
    14339 }
    14340 
    14341 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1567
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1868
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1464 /*
    1465 Define this macro to 0/1 to disable/enable support for recording functionality,
    1466 available through VmaAllocatorCreateInfo::pRecordSettings.
    1467 */
    1468 #ifndef VMA_RECORDING_ENABLED
    1469  #ifdef _WIN32
    1470  #define VMA_RECORDING_ENABLED 1
    1471  #else
    1472  #define VMA_RECORDING_ENABLED 0
    1473  #endif
    1474 #endif
    1475 
    1476 #ifndef NOMINMAX
    1477  #define NOMINMAX // For windows.h
    1478 #endif
    1479 
    1480 #include <vulkan/vulkan.h>
    1481 
    1482 #if VMA_RECORDING_ENABLED
    1483  #include <windows.h>
    1484 #endif
    1485 
    1486 #if !defined(VMA_DEDICATED_ALLOCATION)
    1487  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1488  #define VMA_DEDICATED_ALLOCATION 1
    1489  #else
    1490  #define VMA_DEDICATED_ALLOCATION 0
    1491  #endif
    1492 #endif
    1493 
    1503 VK_DEFINE_HANDLE(VmaAllocator)
    1504 
    1505 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1507  VmaAllocator allocator,
    1508  uint32_t memoryType,
    1509  VkDeviceMemory memory,
    1510  VkDeviceSize size);
    1512 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1513  VmaAllocator allocator,
    1514  uint32_t memoryType,
    1515  VkDeviceMemory memory,
    1516  VkDeviceSize size);
    1517 
    1531 
    1561 
    1564 typedef VkFlags VmaAllocatorCreateFlags;
    1565 
    1570 typedef struct VmaVulkanFunctions {
    1571  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1572  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1573  PFN_vkAllocateMemory vkAllocateMemory;
    1574  PFN_vkFreeMemory vkFreeMemory;
    1575  PFN_vkMapMemory vkMapMemory;
    1576  PFN_vkUnmapMemory vkUnmapMemory;
    1577  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1578  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1579  PFN_vkBindBufferMemory vkBindBufferMemory;
    1580  PFN_vkBindImageMemory vkBindImageMemory;
    1581  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1582  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1583  PFN_vkCreateBuffer vkCreateBuffer;
    1584  PFN_vkDestroyBuffer vkDestroyBuffer;
    1585  PFN_vkCreateImage vkCreateImage;
    1586  PFN_vkDestroyImage vkDestroyImage;
    1587 #if VMA_DEDICATED_ALLOCATION
    1588  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1589  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1590 #endif
    1592 
    1594 typedef enum VmaRecordFlagBits {
    1601 
    1604 typedef VkFlags VmaRecordFlags;
    1605 
    1607 typedef struct VmaRecordSettings
    1608 {
    1618  const char* pFilePath;
    1620 
    1623 {
    1627 
    1628  VkPhysicalDevice physicalDevice;
    1630 
    1631  VkDevice device;
    1633 
    1636 
    1637  const VkAllocationCallbacks* pAllocationCallbacks;
    1639 
    1678  const VkDeviceSize* pHeapSizeLimit;
    1699 
    1701 VkResult vmaCreateAllocator(
    1702  const VmaAllocatorCreateInfo* pCreateInfo,
    1703  VmaAllocator* pAllocator);
    1704 
    1706 void vmaDestroyAllocator(
    1707  VmaAllocator allocator);
    1708 
    1714  VmaAllocator allocator,
    1715  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1716 
    1722  VmaAllocator allocator,
    1723  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1724 
    1732  VmaAllocator allocator,
    1733  uint32_t memoryTypeIndex,
    1734  VkMemoryPropertyFlags* pFlags);
    1735 
    1745  VmaAllocator allocator,
    1746  uint32_t frameIndex);
    1747 
    1750 typedef struct VmaStatInfo
    1751 {
    1753  uint32_t blockCount;
    1759  VkDeviceSize usedBytes;
    1761  VkDeviceSize unusedBytes;
    1764 } VmaStatInfo;
    1765 
    1767 typedef struct VmaStats
    1768 {
    1769  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1770  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1772 } VmaStats;
    1773 
    1775 void vmaCalculateStats(
    1776  VmaAllocator allocator,
    1777  VmaStats* pStats);
    1778 
    1779 #define VMA_STATS_STRING_ENABLED 1
    1780 
    1781 #if VMA_STATS_STRING_ENABLED
    1782 
    1784 
    1786 void vmaBuildStatsString(
    1787  VmaAllocator allocator,
    1788  char** ppStatsString,
    1789  VkBool32 detailedMap);
    1790 
    1791 void vmaFreeStatsString(
    1792  VmaAllocator allocator,
    1793  char* pStatsString);
    1794 
    1795 #endif // #if VMA_STATS_STRING_ENABLED
    1796 
    1805 VK_DEFINE_HANDLE(VmaPool)
    1806 
    1807 typedef enum VmaMemoryUsage
    1808 {
    1857 } VmaMemoryUsage;
    1858 
    1873 
    1928 
    1941 
    1951 
    1958 
    1962 
    1964 {
    1977  VkMemoryPropertyFlags requiredFlags;
    1982  VkMemoryPropertyFlags preferredFlags;
    1990  uint32_t memoryTypeBits;
    2003  void* pUserData;
    2005 
    2022 VkResult vmaFindMemoryTypeIndex(
    2023  VmaAllocator allocator,
    2024  uint32_t memoryTypeBits,
    2025  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2026  uint32_t* pMemoryTypeIndex);
    2027 
    2041  VmaAllocator allocator,
    2042  const VkBufferCreateInfo* pBufferCreateInfo,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkImageCreateInfo* pImageCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2084 
    2101 
    2112 
    2118 
    2121 typedef VkFlags VmaPoolCreateFlags;
    2122 
    2125 typedef struct VmaPoolCreateInfo {
    2140  VkDeviceSize blockSize;
    2169 
    2172 typedef struct VmaPoolStats {
    2175  VkDeviceSize size;
    2178  VkDeviceSize unusedSize;
    2191  VkDeviceSize unusedRangeSizeMax;
    2194  size_t blockCount;
    2195 } VmaPoolStats;
    2196 
    2203 VkResult vmaCreatePool(
    2204  VmaAllocator allocator,
    2205  const VmaPoolCreateInfo* pCreateInfo,
    2206  VmaPool* pPool);
    2207 
    2210 void vmaDestroyPool(
    2211  VmaAllocator allocator,
    2212  VmaPool pool);
    2213 
    2220 void vmaGetPoolStats(
    2221  VmaAllocator allocator,
    2222  VmaPool pool,
    2223  VmaPoolStats* pPoolStats);
    2224 
    2232  VmaAllocator allocator,
    2233  VmaPool pool,
    2234  size_t* pLostAllocationCount);
    2235 
    2250 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2251 
    2276 VK_DEFINE_HANDLE(VmaAllocation)
    2277 
    2278 
    2280 typedef struct VmaAllocationInfo {
    2285  uint32_t memoryType;
    2294  VkDeviceMemory deviceMemory;
    2299  VkDeviceSize offset;
    2304  VkDeviceSize size;
    2318  void* pUserData;
    2320 
    2331 VkResult vmaAllocateMemory(
    2332  VmaAllocator allocator,
    2333  const VkMemoryRequirements* pVkMemoryRequirements,
    2334  const VmaAllocationCreateInfo* pCreateInfo,
    2335  VmaAllocation* pAllocation,
    2336  VmaAllocationInfo* pAllocationInfo);
    2337 
    2345  VmaAllocator allocator,
    2346  VkBuffer buffer,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2352 VkResult vmaAllocateMemoryForImage(
    2353  VmaAllocator allocator,
    2354  VkImage image,
    2355  const VmaAllocationCreateInfo* pCreateInfo,
    2356  VmaAllocation* pAllocation,
    2357  VmaAllocationInfo* pAllocationInfo);
    2358 
    2360 void vmaFreeMemory(
    2361  VmaAllocator allocator,
    2362  VmaAllocation allocation);
    2363 
    2381  VmaAllocator allocator,
    2382  VmaAllocation allocation,
    2383  VmaAllocationInfo* pAllocationInfo);
    2384 
    2399 VkBool32 vmaTouchAllocation(
    2400  VmaAllocator allocator,
    2401  VmaAllocation allocation);
    2402 
    2417  VmaAllocator allocator,
    2418  VmaAllocation allocation,
    2419  void* pUserData);
    2420 
    2432  VmaAllocator allocator,
    2433  VmaAllocation* pAllocation);
    2434 
    2469 VkResult vmaMapMemory(
    2470  VmaAllocator allocator,
    2471  VmaAllocation allocation,
    2472  void** ppData);
    2473 
    2478 void vmaUnmapMemory(
    2479  VmaAllocator allocator,
    2480  VmaAllocation allocation);
    2481 
    2494 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2495 
    2508 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2509 
    2526 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2527 
    2529 typedef struct VmaDefragmentationInfo {
    2534  VkDeviceSize maxBytesToMove;
    2541 
    2543 typedef struct VmaDefragmentationStats {
    2545  VkDeviceSize bytesMoved;
    2547  VkDeviceSize bytesFreed;
    2553 
    2592 VkResult vmaDefragment(
    2593  VmaAllocator allocator,
    2594  VmaAllocation* pAllocations,
    2595  size_t allocationCount,
    2596  VkBool32* pAllocationsChanged,
    2597  const VmaDefragmentationInfo *pDefragmentationInfo,
    2598  VmaDefragmentationStats* pDefragmentationStats);
    2599 
    2612 VkResult vmaBindBufferMemory(
    2613  VmaAllocator allocator,
    2614  VmaAllocation allocation,
    2615  VkBuffer buffer);
    2616 
    2629 VkResult vmaBindImageMemory(
    2630  VmaAllocator allocator,
    2631  VmaAllocation allocation,
    2632  VkImage image);
    2633 
    2660 VkResult vmaCreateBuffer(
    2661  VmaAllocator allocator,
    2662  const VkBufferCreateInfo* pBufferCreateInfo,
    2663  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2664  VkBuffer* pBuffer,
    2665  VmaAllocation* pAllocation,
    2666  VmaAllocationInfo* pAllocationInfo);
    2667 
    2679 void vmaDestroyBuffer(
    2680  VmaAllocator allocator,
    2681  VkBuffer buffer,
    2682  VmaAllocation allocation);
    2683 
    2685 VkResult vmaCreateImage(
    2686  VmaAllocator allocator,
    2687  const VkImageCreateInfo* pImageCreateInfo,
    2688  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2689  VkImage* pImage,
    2690  VmaAllocation* pAllocation,
    2691  VmaAllocationInfo* pAllocationInfo);
    2692 
    2704 void vmaDestroyImage(
    2705  VmaAllocator allocator,
    2706  VkImage image,
    2707  VmaAllocation allocation);
    2708 
    2709 #ifdef __cplusplus
    2710 }
    2711 #endif
    2712 
    2713 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2714 
    2715 // For Visual Studio IntelliSense.
    2716 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2717 #define VMA_IMPLEMENTATION
    2718 #endif
    2719 
    2720 #ifdef VMA_IMPLEMENTATION
    2721 #undef VMA_IMPLEMENTATION
    2722 
    2723 #include <cstdint>
    2724 #include <cstdlib>
    2725 #include <cstring>
    2726 
    2727 /*******************************************************************************
    2728 CONFIGURATION SECTION
    2729 
    2730 Define some of these macros before each #include of this header or change them
    2731 here if you need other then default behavior depending on your environment.
    2732 */
    2733 
    2734 /*
    2735 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2736 internally, like:
    2737 
    2738  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2739 
    2740 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2741 VmaAllocatorCreateInfo::pVulkanFunctions.
    2742 */
    2743 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2744 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2745 #endif
    2746 
    2747 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2748 //#define VMA_USE_STL_CONTAINERS 1
    2749 
    2750 /* Set this macro to 1 to make the library including and using STL containers:
    2751 std::pair, std::vector, std::list, std::unordered_map.
    2752 
    2753 Set it to 0 or undefined to make the library using its own implementation of
    2754 the containers.
    2755 */
    2756 #if VMA_USE_STL_CONTAINERS
    2757  #define VMA_USE_STL_VECTOR 1
    2758  #define VMA_USE_STL_UNORDERED_MAP 1
    2759  #define VMA_USE_STL_LIST 1
    2760 #endif
    2761 
    2762 #if VMA_USE_STL_VECTOR
    2763  #include <vector>
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_UNORDERED_MAP
    2767  #include <unordered_map>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_LIST
    2771  #include <list>
    2772 #endif
    2773 
    2774 /*
    2775 Following headers are used in this CONFIGURATION section only, so feel free to
    2776 remove them if not needed.
    2777 */
    2778 #include <cassert> // for assert
    2779 #include <algorithm> // for min, max
    2780 #include <mutex> // for std::mutex
    2781 #include <atomic> // for std::atomic
    2782 
    2783 #ifndef VMA_NULL
    2784  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2785  #define VMA_NULL nullptr
    2786 #endif
    2787 
    2788 #if defined(__APPLE__) || defined(__ANDROID__)
    2789 #include <cstdlib>
    2790 void *aligned_alloc(size_t alignment, size_t size)
    2791 {
    2792  // alignment must be >= sizeof(void*)
    2793  if(alignment < sizeof(void*))
    2794  {
    2795  alignment = sizeof(void*);
    2796  }
    2797 
    2798  void *pointer;
    2799  if(posix_memalign(&pointer, alignment, size) == 0)
    2800  return pointer;
    2801  return VMA_NULL;
    2802 }
    2803 #endif
    2804 
    2805 // If your compiler is not compatible with C++11 and definition of
    2806 // aligned_alloc() function is missing, uncommeting following line may help:
    2807 
    2808 //#include <malloc.h>
    2809 
    2810 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2811 #ifndef VMA_ASSERT
    2812  #ifdef _DEBUG
    2813  #define VMA_ASSERT(expr) assert(expr)
    2814  #else
    2815  #define VMA_ASSERT(expr)
    2816  #endif
    2817 #endif
    2818 
    2819 // Assert that will be called very often, like inside data structures e.g. operator[].
    2820 // Making it non-empty can make program slow.
    2821 #ifndef VMA_HEAVY_ASSERT
    2822  #ifdef _DEBUG
    2823  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2824  #else
    2825  #define VMA_HEAVY_ASSERT(expr)
    2826  #endif
    2827 #endif
    2828 
    2829 #ifndef VMA_ALIGN_OF
    2830  #define VMA_ALIGN_OF(type) (__alignof(type))
    2831 #endif
    2832 
    2833 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2834  #if defined(_WIN32)
    2835  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2836  #else
    2837  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2838  #endif
    2839 #endif
    2840 
    2841 #ifndef VMA_SYSTEM_FREE
    2842  #if defined(_WIN32)
    2843  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2844  #else
    2845  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2846  #endif
    2847 #endif
    2848 
    2849 #ifndef VMA_MIN
    2850  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2851 #endif
    2852 
    2853 #ifndef VMA_MAX
    2854  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_SWAP
    2858  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2859 #endif
    2860 
    2861 #ifndef VMA_SORT
    2862  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2863 #endif
    2864 
    2865 #ifndef VMA_DEBUG_LOG
    2866  #define VMA_DEBUG_LOG(format, ...)
    2867  /*
    2868  #define VMA_DEBUG_LOG(format, ...) do { \
    2869  printf(format, __VA_ARGS__); \
    2870  printf("\n"); \
    2871  } while(false)
    2872  */
    2873 #endif
    2874 
    2875 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2876 #if VMA_STATS_STRING_ENABLED
    2877  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2878  {
    2879  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2880  }
    2881  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2884  }
    2885  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2886  {
    2887  snprintf(outStr, strLen, "%p", ptr);
    2888  }
    2889 #endif
    2890 
    2891 #ifndef VMA_MUTEX
    2892  class VmaMutex
    2893  {
    2894  public:
    2895  VmaMutex() { }
    2896  ~VmaMutex() { }
    2897  void Lock() { m_Mutex.lock(); }
    2898  void Unlock() { m_Mutex.unlock(); }
    2899  private:
    2900  std::mutex m_Mutex;
    2901  };
    2902  #define VMA_MUTEX VmaMutex
    2903 #endif
    2904 
    2905 /*
    2906 If providing your own implementation, you need to implement a subset of std::atomic:
    2907 
    2908 - Constructor(uint32_t desired)
    2909 - uint32_t load() const
    2910 - void store(uint32_t desired)
    2911 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2912 */
    2913 #ifndef VMA_ATOMIC_UINT32
    2914  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2915 #endif
    2916 
    2917 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2918 
    2922  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2923 #endif
    2924 
    2925 #ifndef VMA_DEBUG_ALIGNMENT
    2926 
    2930  #define VMA_DEBUG_ALIGNMENT (1)
    2931 #endif
    2932 
    2933 #ifndef VMA_DEBUG_MARGIN
    2934 
    2938  #define VMA_DEBUG_MARGIN (0)
    2939 #endif
    2940 
    2941 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2942 
    2946  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2947 #endif
    2948 
    2949 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2950 
    2955  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2956 #endif
    2957 
    2958 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2959 
    2963  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2964 #endif
    2965 
    2966 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2967 
    2971  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2972 #endif
    2973 
    2974 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2975  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2980  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2982 #endif
    2983 
    2984 #ifndef VMA_CLASS_NO_COPY
    2985  #define VMA_CLASS_NO_COPY(className) \
    2986  private: \
    2987  className(const className&) = delete; \
    2988  className& operator=(const className&) = delete;
    2989 #endif
    2990 
    2991 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2992 
    2993 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2994 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2995 
    2996 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    2997 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    2998 
    2999 /*******************************************************************************
    3000 END OF CONFIGURATION
    3001 */
    3002 
    3003 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3004  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3005 
    3006 // Returns number of bits set to 1 in (v).
    3007 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3008 {
    3009  uint32_t c = v - ((v >> 1) & 0x55555555);
    3010  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3011  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3012  c = ((c >> 8) + c) & 0x00FF00FF;
    3013  c = ((c >> 16) + c) & 0x0000FFFF;
    3014  return c;
    3015 }
    3016 
    3017 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3018 // Use types like uint32_t, uint64_t as T.
    3019 template <typename T>
    3020 static inline T VmaAlignUp(T val, T align)
    3021 {
    3022  return (val + align - 1) / align * align;
    3023 }
    3024 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3025 // Use types like uint32_t, uint64_t as T.
    3026 template <typename T>
    3027 static inline T VmaAlignDown(T val, T align)
    3028 {
    3029  return val / align * align;
    3030 }
    3031 
    3032 // Division with mathematical rounding to nearest number.
    3033 template <typename T>
    3034 static inline T VmaRoundDiv(T x, T y)
    3035 {
    3036  return (x + (y / (T)2)) / y;
    3037 }
    3038 
    3039 /*
    3040 Returns true if given number is a power of two.
    3041 T must be unsigned integer number or signed integer but always nonnegative.
    3042 For 0 returns true.
    3043 */
    3044 template <typename T>
    3045 inline bool VmaIsPow2(T x)
    3046 {
    3047  return (x & (x-1)) == 0;
    3048 }
    3049 
    3050 // Returns smallest power of 2 greater or equal to v.
    3051 static inline uint32_t VmaNextPow2(uint32_t v)
    3052 {
    3053  v--;
    3054  v |= v >> 1;
    3055  v |= v >> 2;
    3056  v |= v >> 4;
    3057  v |= v >> 8;
    3058  v |= v >> 16;
    3059  v++;
    3060  return v;
    3061 }
    3062 static inline uint64_t VmaNextPow2(uint64_t v)
    3063 {
    3064  v--;
    3065  v |= v >> 1;
    3066  v |= v >> 2;
    3067  v |= v >> 4;
    3068  v |= v >> 8;
    3069  v |= v >> 16;
    3070  v |= v >> 32;
    3071  v++;
    3072  return v;
    3073 }
    3074 
    3075 // Returns largest power of 2 less or equal to v.
    3076 static inline uint32_t VmaPrevPow2(uint32_t v)
    3077 {
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v = v ^ (v >> 1);
    3084  return v;
    3085 }
    3086 static inline uint64_t VmaPrevPow2(uint64_t v)
    3087 {
    3088  v |= v >> 1;
    3089  v |= v >> 2;
    3090  v |= v >> 4;
    3091  v |= v >> 8;
    3092  v |= v >> 16;
    3093  v |= v >> 32;
    3094  v = v ^ (v >> 1);
    3095  return v;
    3096 }
    3097 
    3098 static inline bool VmaStrIsEmpty(const char* pStr)
    3099 {
    3100  return pStr == VMA_NULL || *pStr == '\0';
    3101 }
    3102 
    3103 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3104 {
    3105  switch(algorithm)
    3106  {
    3108  return "Linear";
    3110  return "Buddy";
    3111  case 0:
    3112  return "Default";
    3113  default:
    3114  VMA_ASSERT(0);
    3115  return "";
    3116  }
    3117 }
    3118 
    3119 #ifndef VMA_SORT
    3120 
    3121 template<typename Iterator, typename Compare>
    3122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3123 {
    3124  Iterator centerValue = end; --centerValue;
    3125  Iterator insertIndex = beg;
    3126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3127  {
    3128  if(cmp(*memTypeIndex, *centerValue))
    3129  {
    3130  if(insertIndex != memTypeIndex)
    3131  {
    3132  VMA_SWAP(*memTypeIndex, *insertIndex);
    3133  }
    3134  ++insertIndex;
    3135  }
    3136  }
    3137  if(insertIndex != centerValue)
    3138  {
    3139  VMA_SWAP(*insertIndex, *centerValue);
    3140  }
    3141  return insertIndex;
    3142 }
    3143 
    3144 template<typename Iterator, typename Compare>
    3145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3146 {
    3147  if(beg < end)
    3148  {
    3149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3152  }
    3153 }
    3154 
    3155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3156 
    3157 #endif // #ifndef VMA_SORT
    3158 
    3159 /*
    3160 Returns true if two memory blocks occupy overlapping pages.
    3161 ResourceA must be in less memory offset than ResourceB.
    3162 
    3163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3165 */
    3166 static inline bool VmaBlocksOnSamePage(
    3167  VkDeviceSize resourceAOffset,
    3168  VkDeviceSize resourceASize,
    3169  VkDeviceSize resourceBOffset,
    3170  VkDeviceSize pageSize)
    3171 {
    3172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3175  VkDeviceSize resourceBStart = resourceBOffset;
    3176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3177  return resourceAEndPage == resourceBStartPage;
    3178 }
    3179 
    3180 enum VmaSuballocationType
    3181 {
    3182  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3189 };
    3190 
    3191 /*
    3192 Returns true if given suballocation types could conflict and must respect
    3193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3194 or linear image and another one is optimal image. If type is unknown, behave
    3195 conservatively.
    3196 */
    3197 static inline bool VmaIsBufferImageGranularityConflict(
    3198  VmaSuballocationType suballocType1,
    3199  VmaSuballocationType suballocType2)
    3200 {
    3201  if(suballocType1 > suballocType2)
    3202  {
    3203  VMA_SWAP(suballocType1, suballocType2);
    3204  }
    3205 
    3206  switch(suballocType1)
    3207  {
    3208  case VMA_SUBALLOCATION_TYPE_FREE:
    3209  return false;
    3210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3211  return true;
    3212  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3213  return
    3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3222  return
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3225  return false;
    3226  default:
    3227  VMA_ASSERT(0);
    3228  return true;
    3229  }
    3230 }
    3231 
    3232 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3233 {
    3234  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3235  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3236  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3237  {
    3238  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3239  }
    3240 }
    3241 
    3242 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3243 {
    3244  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3245  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3246  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3247  {
    3248  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3249  {
    3250  return false;
    3251  }
    3252  }
    3253  return true;
    3254 }
    3255 
    3256 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3257 struct VmaMutexLock
    3258 {
    3259  VMA_CLASS_NO_COPY(VmaMutexLock)
    3260 public:
    3261  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3262  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3263  {
    3264  if(m_pMutex)
    3265  {
    3266  m_pMutex->Lock();
    3267  }
    3268  }
    3269 
    3270  ~VmaMutexLock()
    3271  {
    3272  if(m_pMutex)
    3273  {
    3274  m_pMutex->Unlock();
    3275  }
    3276  }
    3277 
    3278 private:
    3279  VMA_MUTEX* m_pMutex;
    3280 };
    3281 
    3282 #if VMA_DEBUG_GLOBAL_MUTEX
    3283  static VMA_MUTEX gDebugGlobalMutex;
    3284  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3285 #else
    3286  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3287 #endif
    3288 
    3289 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3290 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3291 
    3292 /*
    3293 Performs binary search and returns iterator to first element that is greater or
    3294 equal to (key), according to comparison (cmp).
    3295 
    3296 Cmp should return true if first argument is less than second argument.
    3297 
    3298 Returned value is the found element, if present in the collection or place where
    3299 new element with value (key) should be inserted.
    3300 */
    3301 template <typename CmpLess, typename IterT, typename KeyT>
    3302 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3303 {
    3304  size_t down = 0, up = (end - beg);
    3305  while(down < up)
    3306  {
    3307  const size_t mid = (down + up) / 2;
    3308  if(cmp(*(beg+mid), key))
    3309  {
    3310  down = mid + 1;
    3311  }
    3312  else
    3313  {
    3314  up = mid;
    3315  }
    3316  }
    3317  return beg + down;
    3318 }
    3319 
    3321 // Memory allocation
    3322 
    3323 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3324 {
    3325  if((pAllocationCallbacks != VMA_NULL) &&
    3326  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3327  {
    3328  return (*pAllocationCallbacks->pfnAllocation)(
    3329  pAllocationCallbacks->pUserData,
    3330  size,
    3331  alignment,
    3332  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3333  }
    3334  else
    3335  {
    3336  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3337  }
    3338 }
    3339 
    3340 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3341 {
    3342  if((pAllocationCallbacks != VMA_NULL) &&
    3343  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3344  {
    3345  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3346  }
    3347  else
    3348  {
    3349  VMA_SYSTEM_FREE(ptr);
    3350  }
    3351 }
    3352 
    3353 template<typename T>
    3354 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3355 {
    3356  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3357 }
    3358 
    3359 template<typename T>
    3360 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3361 {
    3362  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3363 }
    3364 
    3365 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3366 
    3367 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3368 
    3369 template<typename T>
    3370 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3371 {
    3372  ptr->~T();
    3373  VmaFree(pAllocationCallbacks, ptr);
    3374 }
    3375 
    3376 template<typename T>
    3377 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3378 {
    3379  if(ptr != VMA_NULL)
    3380  {
    3381  for(size_t i = count; i--; )
    3382  {
    3383  ptr[i].~T();
    3384  }
    3385  VmaFree(pAllocationCallbacks, ptr);
    3386  }
    3387 }
    3388 
    3389 // STL-compatible allocator.
    3390 template<typename T>
    3391 class VmaStlAllocator
    3392 {
    3393 public:
    3394  const VkAllocationCallbacks* const m_pCallbacks;
    3395  typedef T value_type;
    3396 
    3397  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3398  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3399 
    3400  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3401  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3402 
    3403  template<typename U>
    3404  bool operator==(const VmaStlAllocator<U>& rhs) const
    3405  {
    3406  return m_pCallbacks == rhs.m_pCallbacks;
    3407  }
    3408  template<typename U>
    3409  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3410  {
    3411  return m_pCallbacks != rhs.m_pCallbacks;
    3412  }
    3413 
    3414  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3415 };
    3416 
    3417 #if VMA_USE_STL_VECTOR
    3418 
    3419 #define VmaVector std::vector
    3420 
    3421 template<typename T, typename allocatorT>
    3422 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3423 {
    3424  vec.insert(vec.begin() + index, item);
    3425 }
    3426 
    3427 template<typename T, typename allocatorT>
    3428 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3429 {
    3430  vec.erase(vec.begin() + index);
    3431 }
    3432 
    3433 #else // #if VMA_USE_STL_VECTOR
    3434 
    3435 /* Class with interface compatible with subset of std::vector.
    3436 T must be POD because constructors and destructors are not called and memcpy is
    3437 used for these objects. */
    3438 template<typename T, typename AllocatorT>
    3439 class VmaVector
    3440 {
    3441 public:
    3442  typedef T value_type;
    3443 
    3444  VmaVector(const AllocatorT& allocator) :
    3445  m_Allocator(allocator),
    3446  m_pArray(VMA_NULL),
    3447  m_Count(0),
    3448  m_Capacity(0)
    3449  {
    3450  }
    3451 
    3452  VmaVector(size_t count, const AllocatorT& allocator) :
    3453  m_Allocator(allocator),
    3454  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3455  m_Count(count),
    3456  m_Capacity(count)
    3457  {
    3458  }
    3459 
    3460  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3461  m_Allocator(src.m_Allocator),
    3462  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3463  m_Count(src.m_Count),
    3464  m_Capacity(src.m_Count)
    3465  {
    3466  if(m_Count != 0)
    3467  {
    3468  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3469  }
    3470  }
    3471 
    3472  ~VmaVector()
    3473  {
    3474  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3475  }
    3476 
    3477  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3478  {
    3479  if(&rhs != this)
    3480  {
    3481  resize(rhs.m_Count);
    3482  if(m_Count != 0)
    3483  {
    3484  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3485  }
    3486  }
    3487  return *this;
    3488  }
    3489 
    3490  bool empty() const { return m_Count == 0; }
    3491  size_t size() const { return m_Count; }
    3492  T* data() { return m_pArray; }
    3493  const T* data() const { return m_pArray; }
    3494 
    3495  T& operator[](size_t index)
    3496  {
    3497  VMA_HEAVY_ASSERT(index < m_Count);
    3498  return m_pArray[index];
    3499  }
    3500  const T& operator[](size_t index) const
    3501  {
    3502  VMA_HEAVY_ASSERT(index < m_Count);
    3503  return m_pArray[index];
    3504  }
    3505 
    3506  T& front()
    3507  {
    3508  VMA_HEAVY_ASSERT(m_Count > 0);
    3509  return m_pArray[0];
    3510  }
    3511  const T& front() const
    3512  {
    3513  VMA_HEAVY_ASSERT(m_Count > 0);
    3514  return m_pArray[0];
    3515  }
    3516  T& back()
    3517  {
    3518  VMA_HEAVY_ASSERT(m_Count > 0);
    3519  return m_pArray[m_Count - 1];
    3520  }
    3521  const T& back() const
    3522  {
    3523  VMA_HEAVY_ASSERT(m_Count > 0);
    3524  return m_pArray[m_Count - 1];
    3525  }
    3526 
    3527  void reserve(size_t newCapacity, bool freeMemory = false)
    3528  {
    3529  newCapacity = VMA_MAX(newCapacity, m_Count);
    3530 
    3531  if((newCapacity < m_Capacity) && !freeMemory)
    3532  {
    3533  newCapacity = m_Capacity;
    3534  }
    3535 
    3536  if(newCapacity != m_Capacity)
    3537  {
    3538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3539  if(m_Count != 0)
    3540  {
    3541  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3542  }
    3543  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3544  m_Capacity = newCapacity;
    3545  m_pArray = newArray;
    3546  }
    3547  }
    3548 
    3549  void resize(size_t newCount, bool freeMemory = false)
    3550  {
    3551  size_t newCapacity = m_Capacity;
    3552  if(newCount > m_Capacity)
    3553  {
    3554  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3555  }
    3556  else if(freeMemory)
    3557  {
    3558  newCapacity = newCount;
    3559  }
    3560 
    3561  if(newCapacity != m_Capacity)
    3562  {
    3563  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3564  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3565  if(elementsToCopy != 0)
    3566  {
    3567  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3568  }
    3569  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3570  m_Capacity = newCapacity;
    3571  m_pArray = newArray;
    3572  }
    3573 
    3574  m_Count = newCount;
    3575  }
    3576 
    3577  void clear(bool freeMemory = false)
    3578  {
    3579  resize(0, freeMemory);
    3580  }
    3581 
    3582  void insert(size_t index, const T& src)
    3583  {
    3584  VMA_HEAVY_ASSERT(index <= m_Count);
    3585  const size_t oldCount = size();
    3586  resize(oldCount + 1);
    3587  if(index < oldCount)
    3588  {
    3589  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3590  }
    3591  m_pArray[index] = src;
    3592  }
    3593 
    3594  void remove(size_t index)
    3595  {
    3596  VMA_HEAVY_ASSERT(index < m_Count);
    3597  const size_t oldCount = size();
    3598  if(index < oldCount - 1)
    3599  {
    3600  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3601  }
    3602  resize(oldCount - 1);
    3603  }
    3604 
    3605  void push_back(const T& src)
    3606  {
    3607  const size_t newIndex = size();
    3608  resize(newIndex + 1);
    3609  m_pArray[newIndex] = src;
    3610  }
    3611 
    3612  void pop_back()
    3613  {
    3614  VMA_HEAVY_ASSERT(m_Count > 0);
    3615  resize(size() - 1);
    3616  }
    3617 
    3618  void push_front(const T& src)
    3619  {
    3620  insert(0, src);
    3621  }
    3622 
    3623  void pop_front()
    3624  {
    3625  VMA_HEAVY_ASSERT(m_Count > 0);
    3626  remove(0);
    3627  }
    3628 
    3629  typedef T* iterator;
    3630 
    3631  iterator begin() { return m_pArray; }
    3632  iterator end() { return m_pArray + m_Count; }
    3633 
    3634 private:
    3635  AllocatorT m_Allocator;
    3636  T* m_pArray;
    3637  size_t m_Count;
    3638  size_t m_Capacity;
    3639 };
    3640 
    3641 template<typename T, typename allocatorT>
    3642 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3643 {
    3644  vec.insert(index, item);
    3645 }
    3646 
    3647 template<typename T, typename allocatorT>
    3648 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3649 {
    3650  vec.remove(index);
    3651 }
    3652 
    3653 #endif // #if VMA_USE_STL_VECTOR
    3654 
    3655 template<typename CmpLess, typename VectorT>
    3656 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3657 {
    3658  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3659  vector.data(),
    3660  vector.data() + vector.size(),
    3661  value,
    3662  CmpLess()) - vector.data();
    3663  VmaVectorInsert(vector, indexToInsert, value);
    3664  return indexToInsert;
    3665 }
    3666 
    3667 template<typename CmpLess, typename VectorT>
    3668 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3669 {
    3670  CmpLess comparator;
    3671  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3672  vector.begin(),
    3673  vector.end(),
    3674  value,
    3675  comparator);
    3676  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3677  {
    3678  size_t indexToRemove = it - vector.begin();
    3679  VmaVectorRemove(vector, indexToRemove);
    3680  return true;
    3681  }
    3682  return false;
    3683 }
    3684 
    3685 template<typename CmpLess, typename IterT, typename KeyT>
    3686 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3687 {
    3688  CmpLess comparator;
    3689  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3690  beg, end, value, comparator);
    3691  if(it == end ||
    3692  (!comparator(*it, value) && !comparator(value, *it)))
    3693  {
    3694  return it;
    3695  }
    3696  return end;
    3697 }
    3698 
    3700 // class VmaPoolAllocator
    3701 
    3702 /*
    3703 Allocator for objects of type T using a list of arrays (pools) to speed up
    3704 allocation. Number of elements that can be allocated is not bounded because
    3705 allocator can create multiple blocks.
    3706 */
    3707 template<typename T>
    3708 class VmaPoolAllocator
    3709 {
    3710  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3711 public:
    3712  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3713  ~VmaPoolAllocator();
    3714  void Clear();
    3715  T* Alloc();
    3716  void Free(T* ptr);
    3717 
    3718 private:
    3719  union Item
    3720  {
    3721  uint32_t NextFreeIndex;
    3722  T Value;
    3723  };
    3724 
    3725  struct ItemBlock
    3726  {
    3727  Item* pItems;
    3728  uint32_t FirstFreeIndex;
    3729  };
    3730 
    3731  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3732  size_t m_ItemsPerBlock;
    3733  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3734 
    3735  ItemBlock& CreateNewBlock();
    3736 };
    3737 
    3738 template<typename T>
    3739 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3740  m_pAllocationCallbacks(pAllocationCallbacks),
    3741  m_ItemsPerBlock(itemsPerBlock),
    3742  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3743 {
    3744  VMA_ASSERT(itemsPerBlock > 0);
    3745 }
    3746 
    3747 template<typename T>
    3748 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3749 {
    3750  Clear();
    3751 }
    3752 
    3753 template<typename T>
    3754 void VmaPoolAllocator<T>::Clear()
    3755 {
    3756  for(size_t i = m_ItemBlocks.size(); i--; )
    3757  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3758  m_ItemBlocks.clear();
    3759 }
    3760 
    3761 template<typename T>
    3762 T* VmaPoolAllocator<T>::Alloc()
    3763 {
    3764  for(size_t i = m_ItemBlocks.size(); i--; )
    3765  {
    3766  ItemBlock& block = m_ItemBlocks[i];
    3767  // This block has some free items: Use first one.
    3768  if(block.FirstFreeIndex != UINT32_MAX)
    3769  {
    3770  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3771  block.FirstFreeIndex = pItem->NextFreeIndex;
    3772  return &pItem->Value;
    3773  }
    3774  }
    3775 
    3776  // No block has free item: Create new one and use it.
    3777  ItemBlock& newBlock = CreateNewBlock();
    3778  Item* const pItem = &newBlock.pItems[0];
    3779  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3780  return &pItem->Value;
    3781 }
    3782 
    3783 template<typename T>
    3784 void VmaPoolAllocator<T>::Free(T* ptr)
    3785 {
    3786  // Search all memory blocks to find ptr.
    3787  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3788  {
    3789  ItemBlock& block = m_ItemBlocks[i];
    3790 
    3791  // Casting to union.
    3792  Item* pItemPtr;
    3793  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3794 
    3795  // Check if pItemPtr is in address range of this block.
    3796  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3797  {
    3798  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3799  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3800  block.FirstFreeIndex = index;
    3801  return;
    3802  }
    3803  }
    3804  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3805 }
    3806 
    3807 template<typename T>
    3808 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3809 {
    3810  ItemBlock newBlock = {
    3811  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3812 
    3813  m_ItemBlocks.push_back(newBlock);
    3814 
    3815  // Setup singly-linked list of all free items in this block.
    3816  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3817  newBlock.pItems[i].NextFreeIndex = i + 1;
    3818  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3819  return m_ItemBlocks.back();
    3820 }
    3821 
    3823 // class VmaRawList, VmaList
    3824 
    3825 #if VMA_USE_STL_LIST
    3826 
    3827 #define VmaList std::list
    3828 
    3829 #else // #if VMA_USE_STL_LIST
    3830 
    3831 template<typename T>
    3832 struct VmaListItem
    3833 {
    3834  VmaListItem* pPrev;
    3835  VmaListItem* pNext;
    3836  T Value;
    3837 };
    3838 
    3839 // Doubly linked list.
    3840 template<typename T>
    3841 class VmaRawList
    3842 {
    3843  VMA_CLASS_NO_COPY(VmaRawList)
    3844 public:
    3845  typedef VmaListItem<T> ItemType;
    3846 
    3847  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3848  ~VmaRawList();
    3849  void Clear();
    3850 
    3851  size_t GetCount() const { return m_Count; }
    3852  bool IsEmpty() const { return m_Count == 0; }
    3853 
    3854  ItemType* Front() { return m_pFront; }
    3855  const ItemType* Front() const { return m_pFront; }
    3856  ItemType* Back() { return m_pBack; }
    3857  const ItemType* Back() const { return m_pBack; }
    3858 
    3859  ItemType* PushBack();
    3860  ItemType* PushFront();
    3861  ItemType* PushBack(const T& value);
    3862  ItemType* PushFront(const T& value);
    3863  void PopBack();
    3864  void PopFront();
    3865 
    3866  // Item can be null - it means PushBack.
    3867  ItemType* InsertBefore(ItemType* pItem);
    3868  // Item can be null - it means PushFront.
    3869  ItemType* InsertAfter(ItemType* pItem);
    3870 
    3871  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3872  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3873 
    3874  void Remove(ItemType* pItem);
    3875 
    3876 private:
    3877  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3878  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3879  ItemType* m_pFront;
    3880  ItemType* m_pBack;
    3881  size_t m_Count;
    3882 };
    3883 
    3884 template<typename T>
    3885 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3886  m_pAllocationCallbacks(pAllocationCallbacks),
    3887  m_ItemAllocator(pAllocationCallbacks, 128),
    3888  m_pFront(VMA_NULL),
    3889  m_pBack(VMA_NULL),
    3890  m_Count(0)
    3891 {
    3892 }
    3893 
    3894 template<typename T>
    3895 VmaRawList<T>::~VmaRawList()
    3896 {
    3897  // Intentionally not calling Clear, because that would be unnecessary
    3898  // computations to return all items to m_ItemAllocator as free.
    3899 }
    3900 
    3901 template<typename T>
    3902 void VmaRawList<T>::Clear()
    3903 {
    3904  if(IsEmpty() == false)
    3905  {
    3906  ItemType* pItem = m_pBack;
    3907  while(pItem != VMA_NULL)
    3908  {
    3909  ItemType* const pPrevItem = pItem->pPrev;
    3910  m_ItemAllocator.Free(pItem);
    3911  pItem = pPrevItem;
    3912  }
    3913  m_pFront = VMA_NULL;
    3914  m_pBack = VMA_NULL;
    3915  m_Count = 0;
    3916  }
    3917 }
    3918 
    3919 template<typename T>
    3920 VmaListItem<T>* VmaRawList<T>::PushBack()
    3921 {
    3922  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3923  pNewItem->pNext = VMA_NULL;
    3924  if(IsEmpty())
    3925  {
    3926  pNewItem->pPrev = VMA_NULL;
    3927  m_pFront = pNewItem;
    3928  m_pBack = pNewItem;
    3929  m_Count = 1;
    3930  }
    3931  else
    3932  {
    3933  pNewItem->pPrev = m_pBack;
    3934  m_pBack->pNext = pNewItem;
    3935  m_pBack = pNewItem;
    3936  ++m_Count;
    3937  }
    3938  return pNewItem;
    3939 }
    3940 
    3941 template<typename T>
    3942 VmaListItem<T>* VmaRawList<T>::PushFront()
    3943 {
    3944  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3945  pNewItem->pPrev = VMA_NULL;
    3946  if(IsEmpty())
    3947  {
    3948  pNewItem->pNext = VMA_NULL;
    3949  m_pFront = pNewItem;
    3950  m_pBack = pNewItem;
    3951  m_Count = 1;
    3952  }
    3953  else
    3954  {
    3955  pNewItem->pNext = m_pFront;
    3956  m_pFront->pPrev = pNewItem;
    3957  m_pFront = pNewItem;
    3958  ++m_Count;
    3959  }
    3960  return pNewItem;
    3961 }
    3962 
    3963 template<typename T>
    3964 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3965 {
    3966  ItemType* const pNewItem = PushBack();
    3967  pNewItem->Value = value;
    3968  return pNewItem;
    3969 }
    3970 
    3971 template<typename T>
    3972 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3973 {
    3974  ItemType* const pNewItem = PushFront();
    3975  pNewItem->Value = value;
    3976  return pNewItem;
    3977 }
    3978 
    3979 template<typename T>
    3980 void VmaRawList<T>::PopBack()
    3981 {
    3982  VMA_HEAVY_ASSERT(m_Count > 0);
    3983  ItemType* const pBackItem = m_pBack;
    3984  ItemType* const pPrevItem = pBackItem->pPrev;
    3985  if(pPrevItem != VMA_NULL)
    3986  {
    3987  pPrevItem->pNext = VMA_NULL;
    3988  }
    3989  m_pBack = pPrevItem;
    3990  m_ItemAllocator.Free(pBackItem);
    3991  --m_Count;
    3992 }
    3993 
    3994 template<typename T>
    3995 void VmaRawList<T>::PopFront()
    3996 {
    3997  VMA_HEAVY_ASSERT(m_Count > 0);
    3998  ItemType* const pFrontItem = m_pFront;
    3999  ItemType* const pNextItem = pFrontItem->pNext;
    4000  if(pNextItem != VMA_NULL)
    4001  {
    4002  pNextItem->pPrev = VMA_NULL;
    4003  }
    4004  m_pFront = pNextItem;
    4005  m_ItemAllocator.Free(pFrontItem);
    4006  --m_Count;
    4007 }
    4008 
    4009 template<typename T>
    4010 void VmaRawList<T>::Remove(ItemType* pItem)
    4011 {
    4012  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4013  VMA_HEAVY_ASSERT(m_Count > 0);
    4014 
    4015  if(pItem->pPrev != VMA_NULL)
    4016  {
    4017  pItem->pPrev->pNext = pItem->pNext;
    4018  }
    4019  else
    4020  {
    4021  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4022  m_pFront = pItem->pNext;
    4023  }
    4024 
    4025  if(pItem->pNext != VMA_NULL)
    4026  {
    4027  pItem->pNext->pPrev = pItem->pPrev;
    4028  }
    4029  else
    4030  {
    4031  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4032  m_pBack = pItem->pPrev;
    4033  }
    4034 
    4035  m_ItemAllocator.Free(pItem);
    4036  --m_Count;
    4037 }
    4038 
    4039 template<typename T>
    4040 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4041 {
    4042  if(pItem != VMA_NULL)
    4043  {
    4044  ItemType* const prevItem = pItem->pPrev;
    4045  ItemType* const newItem = m_ItemAllocator.Alloc();
    4046  newItem->pPrev = prevItem;
    4047  newItem->pNext = pItem;
    4048  pItem->pPrev = newItem;
    4049  if(prevItem != VMA_NULL)
    4050  {
    4051  prevItem->pNext = newItem;
    4052  }
    4053  else
    4054  {
    4055  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4056  m_pFront = newItem;
    4057  }
    4058  ++m_Count;
    4059  return newItem;
    4060  }
    4061  else
    4062  return PushBack();
    4063 }
    4064 
    4065 template<typename T>
    4066 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4067 {
    4068  if(pItem != VMA_NULL)
    4069  {
    4070  ItemType* const nextItem = pItem->pNext;
    4071  ItemType* const newItem = m_ItemAllocator.Alloc();
    4072  newItem->pNext = nextItem;
    4073  newItem->pPrev = pItem;
    4074  pItem->pNext = newItem;
    4075  if(nextItem != VMA_NULL)
    4076  {
    4077  nextItem->pPrev = newItem;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = newItem;
    4083  }
    4084  ++m_Count;
    4085  return newItem;
    4086  }
    4087  else
    4088  return PushFront();
    4089 }
    4090 
    4091 template<typename T>
    4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4093 {
    4094  ItemType* const newItem = InsertBefore(pItem);
    4095  newItem->Value = value;
    4096  return newItem;
    4097 }
    4098 
    4099 template<typename T>
    4100 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4101 {
    4102  ItemType* const newItem = InsertAfter(pItem);
    4103  newItem->Value = value;
    4104  return newItem;
    4105 }
    4106 
    4107 template<typename T, typename AllocatorT>
    4108 class VmaList
    4109 {
    4110  VMA_CLASS_NO_COPY(VmaList)
    4111 public:
    4112  class iterator
    4113  {
    4114  public:
    4115  iterator() :
    4116  m_pList(VMA_NULL),
    4117  m_pItem(VMA_NULL)
    4118  {
    4119  }
    4120 
    4121  T& operator*() const
    4122  {
    4123  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4124  return m_pItem->Value;
    4125  }
    4126  T* operator->() const
    4127  {
    4128  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4129  return &m_pItem->Value;
    4130  }
    4131 
    4132  iterator& operator++()
    4133  {
    4134  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4135  m_pItem = m_pItem->pNext;
    4136  return *this;
    4137  }
    4138  iterator& operator--()
    4139  {
    4140  if(m_pItem != VMA_NULL)
    4141  {
    4142  m_pItem = m_pItem->pPrev;
    4143  }
    4144  else
    4145  {
    4146  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4147  m_pItem = m_pList->Back();
    4148  }
    4149  return *this;
    4150  }
    4151 
    4152  iterator operator++(int)
    4153  {
    4154  iterator result = *this;
    4155  ++*this;
    4156  return result;
    4157  }
    4158  iterator operator--(int)
    4159  {
    4160  iterator result = *this;
    4161  --*this;
    4162  return result;
    4163  }
    4164 
    4165  bool operator==(const iterator& rhs) const
    4166  {
    4167  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4168  return m_pItem == rhs.m_pItem;
    4169  }
    4170  bool operator!=(const iterator& rhs) const
    4171  {
    4172  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4173  return m_pItem != rhs.m_pItem;
    4174  }
    4175 
    4176  private:
    4177  VmaRawList<T>* m_pList;
    4178  VmaListItem<T>* m_pItem;
    4179 
    4180  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4181  m_pList(pList),
    4182  m_pItem(pItem)
    4183  {
    4184  }
    4185 
    4186  friend class VmaList<T, AllocatorT>;
    4187  };
    4188 
    4189  class const_iterator
    4190  {
    4191  public:
    4192  const_iterator() :
    4193  m_pList(VMA_NULL),
    4194  m_pItem(VMA_NULL)
    4195  {
    4196  }
    4197 
    4198  const_iterator(const iterator& src) :
    4199  m_pList(src.m_pList),
    4200  m_pItem(src.m_pItem)
    4201  {
    4202  }
    4203 
    4204  const T& operator*() const
    4205  {
    4206  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4207  return m_pItem->Value;
    4208  }
    4209  const T* operator->() const
    4210  {
    4211  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4212  return &m_pItem->Value;
    4213  }
    4214 
    4215  const_iterator& operator++()
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4218  m_pItem = m_pItem->pNext;
    4219  return *this;
    4220  }
    4221  const_iterator& operator--()
    4222  {
    4223  if(m_pItem != VMA_NULL)
    4224  {
    4225  m_pItem = m_pItem->pPrev;
    4226  }
    4227  else
    4228  {
    4229  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4230  m_pItem = m_pList->Back();
    4231  }
    4232  return *this;
    4233  }
    4234 
    4235  const_iterator operator++(int)
    4236  {
    4237  const_iterator result = *this;
    4238  ++*this;
    4239  return result;
    4240  }
    4241  const_iterator operator--(int)
    4242  {
    4243  const_iterator result = *this;
    4244  --*this;
    4245  return result;
    4246  }
    4247 
    4248  bool operator==(const const_iterator& rhs) const
    4249  {
    4250  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4251  return m_pItem == rhs.m_pItem;
    4252  }
    4253  bool operator!=(const const_iterator& rhs) const
    4254  {
    4255  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4256  return m_pItem != rhs.m_pItem;
    4257  }
    4258 
    4259  private:
    4260  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4261  m_pList(pList),
    4262  m_pItem(pItem)
    4263  {
    4264  }
    4265 
    4266  const VmaRawList<T>* m_pList;
    4267  const VmaListItem<T>* m_pItem;
    4268 
    4269  friend class VmaList<T, AllocatorT>;
    4270  };
    4271 
    4272  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4273 
    4274  bool empty() const { return m_RawList.IsEmpty(); }
    4275  size_t size() const { return m_RawList.GetCount(); }
    4276 
    4277  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4278  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4279 
    4280  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4281  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4282 
    4283  void clear() { m_RawList.Clear(); }
    4284  void push_back(const T& value) { m_RawList.PushBack(value); }
    4285  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4286  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4287 
    4288 private:
    4289  VmaRawList<T> m_RawList;
    4290 };
    4291 
    4292 #endif // #if VMA_USE_STL_LIST
    4293 
    4295 // class VmaMap
    4296 
    4297 // Unused in this version.
    4298 #if 0
    4299 
    4300 #if VMA_USE_STL_UNORDERED_MAP
    4301 
    4302 #define VmaPair std::pair
    4303 
    4304 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4305  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4306 
    4307 #else // #if VMA_USE_STL_UNORDERED_MAP
    4308 
    4309 template<typename T1, typename T2>
    4310 struct VmaPair
    4311 {
    4312  T1 first;
    4313  T2 second;
    4314 
    4315  VmaPair() : first(), second() { }
    4316  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4317 };
    4318 
    4319 /* Class compatible with subset of interface of std::unordered_map.
    4320 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4321 */
    4322 template<typename KeyT, typename ValueT>
    4323 class VmaMap
    4324 {
    4325 public:
    4326  typedef VmaPair<KeyT, ValueT> PairType;
    4327  typedef PairType* iterator;
    4328 
    4329  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4330 
    4331  iterator begin() { return m_Vector.begin(); }
    4332  iterator end() { return m_Vector.end(); }
    4333 
    4334  void insert(const PairType& pair);
    4335  iterator find(const KeyT& key);
    4336  void erase(iterator it);
    4337 
    4338 private:
    4339  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4340 };
    4341 
    4342 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4343 
    4344 template<typename FirstT, typename SecondT>
    4345 struct VmaPairFirstLess
    4346 {
    4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4348  {
    4349  return lhs.first < rhs.first;
    4350  }
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4352  {
    4353  return lhs.first < rhsFirst;
    4354  }
    4355 };
    4356 
    4357 template<typename KeyT, typename ValueT>
    4358 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4359 {
    4360  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4361  m_Vector.data(),
    4362  m_Vector.data() + m_Vector.size(),
    4363  pair,
    4364  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4365  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4366 }
    4367 
    4368 template<typename KeyT, typename ValueT>
    4369 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4370 {
    4371  PairType* it = VmaBinaryFindFirstNotLess(
    4372  m_Vector.data(),
    4373  m_Vector.data() + m_Vector.size(),
    4374  key,
    4375  VmaPairFirstLess<KeyT, ValueT>());
    4376  if((it != m_Vector.end()) && (it->first == key))
    4377  {
    4378  return it;
    4379  }
    4380  else
    4381  {
    4382  return m_Vector.end();
    4383  }
    4384 }
    4385 
    4386 template<typename KeyT, typename ValueT>
    4387 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4388 {
    4389  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4390 }
    4391 
    4392 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4393 
    4394 #endif // #if 0
    4395 
    4397 
    4398 class VmaDeviceMemoryBlock;
    4399 
    4400 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4401 
    4402 struct VmaAllocation_T
    4403 {
    4404  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4405 private:
    4406  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4407 
    4408  enum FLAGS
    4409  {
    4410  FLAG_USER_DATA_STRING = 0x01,
    4411  };
    4412 
    4413 public:
    4414  enum ALLOCATION_TYPE
    4415  {
    4416  ALLOCATION_TYPE_NONE,
    4417  ALLOCATION_TYPE_BLOCK,
    4418  ALLOCATION_TYPE_DEDICATED,
    4419  };
    4420 
    4421  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4422  m_Alignment(1),
    4423  m_Size(0),
    4424  m_pUserData(VMA_NULL),
    4425  m_LastUseFrameIndex(currentFrameIndex),
    4426  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4427  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4428  m_MapCount(0),
    4429  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4430  {
    4431 #if VMA_STATS_STRING_ENABLED
    4432  m_CreationFrameIndex = currentFrameIndex;
    4433  m_BufferImageUsage = 0;
    4434 #endif
    4435  }
    4436 
    4437  ~VmaAllocation_T()
    4438  {
    4439  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4440 
    4441  // Check if owned string was freed.
    4442  VMA_ASSERT(m_pUserData == VMA_NULL);
    4443  }
    4444 
    4445  void InitBlockAllocation(
    4446  VmaPool hPool,
    4447  VmaDeviceMemoryBlock* block,
    4448  VkDeviceSize offset,
    4449  VkDeviceSize alignment,
    4450  VkDeviceSize size,
    4451  VmaSuballocationType suballocationType,
    4452  bool mapped,
    4453  bool canBecomeLost)
    4454  {
    4455  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4456  VMA_ASSERT(block != VMA_NULL);
    4457  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4458  m_Alignment = alignment;
    4459  m_Size = size;
    4460  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4461  m_SuballocationType = (uint8_t)suballocationType;
    4462  m_BlockAllocation.m_hPool = hPool;
    4463  m_BlockAllocation.m_Block = block;
    4464  m_BlockAllocation.m_Offset = offset;
    4465  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4466  }
    4467 
    4468  void InitLost()
    4469  {
    4470  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4471  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4472  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4473  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4474  m_BlockAllocation.m_Block = VMA_NULL;
    4475  m_BlockAllocation.m_Offset = 0;
    4476  m_BlockAllocation.m_CanBecomeLost = true;
    4477  }
    4478 
    4479  void ChangeBlockAllocation(
    4480  VmaAllocator hAllocator,
    4481  VmaDeviceMemoryBlock* block,
    4482  VkDeviceSize offset);
    4483 
    4484  // pMappedData not null means allocation is created with MAPPED flag.
    4485  void InitDedicatedAllocation(
    4486  uint32_t memoryTypeIndex,
    4487  VkDeviceMemory hMemory,
    4488  VmaSuballocationType suballocationType,
    4489  void* pMappedData,
    4490  VkDeviceSize size)
    4491  {
    4492  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4493  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4494  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4495  m_Alignment = 0;
    4496  m_Size = size;
    4497  m_SuballocationType = (uint8_t)suballocationType;
    4498  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4499  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4500  m_DedicatedAllocation.m_hMemory = hMemory;
    4501  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4502  }
    4503 
    4504  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4505  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4506  VkDeviceSize GetSize() const { return m_Size; }
    4507  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4508  void* GetUserData() const { return m_pUserData; }
    4509  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4510  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4511 
    4512  VmaDeviceMemoryBlock* GetBlock() const
    4513  {
    4514  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4515  return m_BlockAllocation.m_Block;
    4516  }
    4517  VkDeviceSize GetOffset() const;
    4518  VkDeviceMemory GetMemory() const;
    4519  uint32_t GetMemoryTypeIndex() const;
    4520  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4521  void* GetMappedData() const;
    4522  bool CanBecomeLost() const;
    4523  VmaPool GetPool() const;
    4524 
    4525  uint32_t GetLastUseFrameIndex() const
    4526  {
    4527  return m_LastUseFrameIndex.load();
    4528  }
    4529  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4530  {
    4531  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4532  }
    4533  /*
    4534  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4535  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4536  - Else, returns false.
    4537 
    4538  If hAllocation is already lost, assert - you should not call it then.
    4539  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4540  */
    4541  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4542 
    4543  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4544  {
    4545  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4546  outInfo.blockCount = 1;
    4547  outInfo.allocationCount = 1;
    4548  outInfo.unusedRangeCount = 0;
    4549  outInfo.usedBytes = m_Size;
    4550  outInfo.unusedBytes = 0;
    4551  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4552  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4553  outInfo.unusedRangeSizeMax = 0;
    4554  }
    4555 
    4556  void BlockAllocMap();
    4557  void BlockAllocUnmap();
    4558  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4559  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4560 
    4561 #if VMA_STATS_STRING_ENABLED
    4562  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4563  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4564 
    4565  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4566  {
    4567  VMA_ASSERT(m_BufferImageUsage == 0);
    4568  m_BufferImageUsage = bufferImageUsage;
    4569  }
    4570 
    4571  void PrintParameters(class VmaJsonWriter& json) const;
    4572 #endif
    4573 
    4574 private:
    4575  VkDeviceSize m_Alignment;
    4576  VkDeviceSize m_Size;
    4577  void* m_pUserData;
    4578  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4579  uint8_t m_Type; // ALLOCATION_TYPE
    4580  uint8_t m_SuballocationType; // VmaSuballocationType
    4581  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4582  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4583  uint8_t m_MapCount;
    4584  uint8_t m_Flags; // enum FLAGS
    4585 
    4586  // Allocation out of VmaDeviceMemoryBlock.
    4587  struct BlockAllocation
    4588  {
    4589  VmaPool m_hPool; // Null if belongs to general memory.
    4590  VmaDeviceMemoryBlock* m_Block;
    4591  VkDeviceSize m_Offset;
    4592  bool m_CanBecomeLost;
    4593  };
    4594 
    4595  // Allocation for an object that has its own private VkDeviceMemory.
    4596  struct DedicatedAllocation
    4597  {
    4598  uint32_t m_MemoryTypeIndex;
    4599  VkDeviceMemory m_hMemory;
    4600  void* m_pMappedData; // Not null means memory is mapped.
    4601  };
    4602 
    4603  union
    4604  {
    4605  // Allocation out of VmaDeviceMemoryBlock.
    4606  BlockAllocation m_BlockAllocation;
    4607  // Allocation for an object that has its own private VkDeviceMemory.
    4608  DedicatedAllocation m_DedicatedAllocation;
    4609  };
    4610 
    4611 #if VMA_STATS_STRING_ENABLED
    4612  uint32_t m_CreationFrameIndex;
    4613  uint32_t m_BufferImageUsage; // 0 if unknown.
    4614 #endif
    4615 
    4616  void FreeUserDataString(VmaAllocator hAllocator);
    4617 };
    4618 
    4619 /*
    4620 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4621 allocated memory block or free.
    4622 */
    4623 struct VmaSuballocation
    4624 {
    4625  VkDeviceSize offset;
    4626  VkDeviceSize size;
    4627  VmaAllocation hAllocation;
    4628  VmaSuballocationType type;
    4629 };
    4630 
    4631 // Comparator for offsets.
    4632 struct VmaSuballocationOffsetLess
    4633 {
    4634  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4635  {
    4636  return lhs.offset < rhs.offset;
    4637  }
    4638 };
    4639 struct VmaSuballocationOffsetGreater
    4640 {
    4641  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4642  {
    4643  return lhs.offset > rhs.offset;
    4644  }
    4645 };
    4646 
    4647 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4648 
    4649 // Cost of one additional allocation lost, as equivalent in bytes.
    4650 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4651 
    4652 /*
    4653 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4654 
    4655 If canMakeOtherLost was false:
    4656 - item points to a FREE suballocation.
    4657 - itemsToMakeLostCount is 0.
    4658 
    4659 If canMakeOtherLost was true:
    4660 - item points to first of sequence of suballocations, which are either FREE,
    4661  or point to VmaAllocations that can become lost.
    4662 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4663  the requested allocation to succeed.
    4664 */
    4665 struct VmaAllocationRequest
    4666 {
    4667  VkDeviceSize offset;
    4668  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4669  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4670  VmaSuballocationList::iterator item;
    4671  size_t itemsToMakeLostCount;
    4672  void* customData;
    4673 
    4674  VkDeviceSize CalcCost() const
    4675  {
    4676  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4677  }
    4678 };
    4679 
    4680 /*
    4681 Data structure used for bookkeeping of allocations and unused ranges of memory
    4682 in a single VkDeviceMemory block.
    4683 */
    4684 class VmaBlockMetadata
    4685 {
    4686 public:
    4687  VmaBlockMetadata(VmaAllocator hAllocator);
    4688  virtual ~VmaBlockMetadata() { }
    4689  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4690 
    4691  // Validates all data structures inside this object. If not valid, returns false.
    4692  virtual bool Validate() const = 0;
    4693  VkDeviceSize GetSize() const { return m_Size; }
    4694  virtual size_t GetAllocationCount() const = 0;
    4695  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4696  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4697  // Returns true if this block is empty - contains only single free suballocation.
    4698  virtual bool IsEmpty() const = 0;
    4699 
    4700  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4701  // Shouldn't modify blockCount.
    4702  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4703 
    4704 #if VMA_STATS_STRING_ENABLED
    4705  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4706 #endif
    4707 
    4708  // Tries to find a place for suballocation with given parameters inside this block.
    4709  // If succeeded, fills pAllocationRequest and returns true.
    4710  // If failed, returns false.
    4711  virtual bool CreateAllocationRequest(
    4712  uint32_t currentFrameIndex,
    4713  uint32_t frameInUseCount,
    4714  VkDeviceSize bufferImageGranularity,
    4715  VkDeviceSize allocSize,
    4716  VkDeviceSize allocAlignment,
    4717  bool upperAddress,
    4718  VmaSuballocationType allocType,
    4719  bool canMakeOtherLost,
    4720  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4721  VmaAllocationRequest* pAllocationRequest) = 0;
    4722 
    4723  virtual bool MakeRequestedAllocationsLost(
    4724  uint32_t currentFrameIndex,
    4725  uint32_t frameInUseCount,
    4726  VmaAllocationRequest* pAllocationRequest) = 0;
    4727 
    4728  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4729 
    4730  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4731 
    4732  // Makes actual allocation based on request. Request must already be checked and valid.
    4733  virtual void Alloc(
    4734  const VmaAllocationRequest& request,
    4735  VmaSuballocationType type,
    4736  VkDeviceSize allocSize,
    4737  bool upperAddress,
    4738  VmaAllocation hAllocation) = 0;
    4739 
    4740  // Frees suballocation assigned to given memory region.
    4741  virtual void Free(const VmaAllocation allocation) = 0;
    4742  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4743 
    4744 protected:
    4745  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4746 
    4747 #if VMA_STATS_STRING_ENABLED
    4748  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4749  VkDeviceSize unusedBytes,
    4750  size_t allocationCount,
    4751  size_t unusedRangeCount) const;
    4752  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4753  VkDeviceSize offset,
    4754  VmaAllocation hAllocation) const;
    4755  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4756  VkDeviceSize offset,
    4757  VkDeviceSize size) const;
    4758  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4759 #endif
    4760 
    4761 private:
    4762  VkDeviceSize m_Size;
    4763  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4764 };
    4765 
    4766 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4767  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4768  return false; \
    4769  } } while(false)
    4770 
    4771 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4772 {
    4773  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4774 public:
    4775  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4776  virtual ~VmaBlockMetadata_Generic();
    4777  virtual void Init(VkDeviceSize size);
    4778 
    4779  virtual bool Validate() const;
    4780  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4781  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4782  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4783  virtual bool IsEmpty() const;
    4784 
    4785  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4786  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4787 
    4788 #if VMA_STATS_STRING_ENABLED
    4789  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4790 #endif
    4791 
    4792  virtual bool CreateAllocationRequest(
    4793  uint32_t currentFrameIndex,
    4794  uint32_t frameInUseCount,
    4795  VkDeviceSize bufferImageGranularity,
    4796  VkDeviceSize allocSize,
    4797  VkDeviceSize allocAlignment,
    4798  bool upperAddress,
    4799  VmaSuballocationType allocType,
    4800  bool canMakeOtherLost,
    4801  uint32_t strategy,
    4802  VmaAllocationRequest* pAllocationRequest);
    4803 
    4804  virtual bool MakeRequestedAllocationsLost(
    4805  uint32_t currentFrameIndex,
    4806  uint32_t frameInUseCount,
    4807  VmaAllocationRequest* pAllocationRequest);
    4808 
    4809  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4810 
    4811  virtual VkResult CheckCorruption(const void* pBlockData);
    4812 
    4813  virtual void Alloc(
    4814  const VmaAllocationRequest& request,
    4815  VmaSuballocationType type,
    4816  VkDeviceSize allocSize,
    4817  bool upperAddress,
    4818  VmaAllocation hAllocation);
    4819 
    4820  virtual void Free(const VmaAllocation allocation);
    4821  virtual void FreeAtOffset(VkDeviceSize offset);
    4822 
    4823 private:
    4824  uint32_t m_FreeCount;
    4825  VkDeviceSize m_SumFreeSize;
    4826  VmaSuballocationList m_Suballocations;
    4827  // Suballocations that are free and have size greater than certain threshold.
    4828  // Sorted by size, ascending.
    4829  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4830 
    4831  bool ValidateFreeSuballocationList() const;
    4832 
    4833  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4834  // If yes, fills pOffset and returns true. If no, returns false.
    4835  bool CheckAllocation(
    4836  uint32_t currentFrameIndex,
    4837  uint32_t frameInUseCount,
    4838  VkDeviceSize bufferImageGranularity,
    4839  VkDeviceSize allocSize,
    4840  VkDeviceSize allocAlignment,
    4841  VmaSuballocationType allocType,
    4842  VmaSuballocationList::const_iterator suballocItem,
    4843  bool canMakeOtherLost,
    4844  VkDeviceSize* pOffset,
    4845  size_t* itemsToMakeLostCount,
    4846  VkDeviceSize* pSumFreeSize,
    4847  VkDeviceSize* pSumItemSize) const;
    4848  // Given free suballocation, it merges it with following one, which must also be free.
    4849  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4850  // Releases given suballocation, making it free.
    4851  // Merges it with adjacent free suballocations if applicable.
    4852  // Returns iterator to new free suballocation at this place.
    4853  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4854  // Given free suballocation, it inserts it into sorted list of
    4855  // m_FreeSuballocationsBySize if it's suitable.
    4856  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4857  // Given free suballocation, it removes it from sorted list of
    4858  // m_FreeSuballocationsBySize if it's suitable.
    4859  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4860 };
    4861 
    4862 /*
    4863 Allocations and their references in internal data structure look like this:
    4864 
    4865 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4866 
    4867  0 +-------+
    4868  | |
    4869  | |
    4870  | |
    4871  +-------+
    4872  | Alloc | 1st[m_1stNullItemsBeginCount]
    4873  +-------+
    4874  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4875  +-------+
    4876  | ... |
    4877  +-------+
    4878  | Alloc | 1st[1st.size() - 1]
    4879  +-------+
    4880  | |
    4881  | |
    4882  | |
    4883 GetSize() +-------+
    4884 
    4885 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4886 
    4887  0 +-------+
    4888  | Alloc | 2nd[0]
    4889  +-------+
    4890  | Alloc | 2nd[1]
    4891  +-------+
    4892  | ... |
    4893  +-------+
    4894  | Alloc | 2nd[2nd.size() - 1]
    4895  +-------+
    4896  | |
    4897  | |
    4898  | |
    4899  +-------+
    4900  | Alloc | 1st[m_1stNullItemsBeginCount]
    4901  +-------+
    4902  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4903  +-------+
    4904  | ... |
    4905  +-------+
    4906  | Alloc | 1st[1st.size() - 1]
    4907  +-------+
    4908  | |
    4909 GetSize() +-------+
    4910 
    4911 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4912 
    4913  0 +-------+
    4914  | |
    4915  | |
    4916  | |
    4917  +-------+
    4918  | Alloc | 1st[m_1stNullItemsBeginCount]
    4919  +-------+
    4920  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4921  +-------+
    4922  | ... |
    4923  +-------+
    4924  | Alloc | 1st[1st.size() - 1]
    4925  +-------+
    4926  | |
    4927  | |
    4928  | |
    4929  +-------+
    4930  | Alloc | 2nd[2nd.size() - 1]
    4931  +-------+
    4932  | ... |
    4933  +-------+
    4934  | Alloc | 2nd[1]
    4935  +-------+
    4936  | Alloc | 2nd[0]
    4937 GetSize() +-------+
    4938 
    4939 */
    4940 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4941 {
    4942  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4943 public:
    4944  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4945  virtual ~VmaBlockMetadata_Linear();
    4946  virtual void Init(VkDeviceSize size);
    4947 
    4948  virtual bool Validate() const;
    4949  virtual size_t GetAllocationCount() const;
    4950  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4951  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4952  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4953 
    4954  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4955  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4956 
    4957 #if VMA_STATS_STRING_ENABLED
    4958  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4959 #endif
    4960 
    4961  virtual bool CreateAllocationRequest(
    4962  uint32_t currentFrameIndex,
    4963  uint32_t frameInUseCount,
    4964  VkDeviceSize bufferImageGranularity,
    4965  VkDeviceSize allocSize,
    4966  VkDeviceSize allocAlignment,
    4967  bool upperAddress,
    4968  VmaSuballocationType allocType,
    4969  bool canMakeOtherLost,
    4970  uint32_t strategy,
    4971  VmaAllocationRequest* pAllocationRequest);
    4972 
    4973  virtual bool MakeRequestedAllocationsLost(
    4974  uint32_t currentFrameIndex,
    4975  uint32_t frameInUseCount,
    4976  VmaAllocationRequest* pAllocationRequest);
    4977 
    4978  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4979 
    4980  virtual VkResult CheckCorruption(const void* pBlockData);
    4981 
    4982  virtual void Alloc(
    4983  const VmaAllocationRequest& request,
    4984  VmaSuballocationType type,
    4985  VkDeviceSize allocSize,
    4986  bool upperAddress,
    4987  VmaAllocation hAllocation);
    4988 
    4989  virtual void Free(const VmaAllocation allocation);
    4990  virtual void FreeAtOffset(VkDeviceSize offset);
    4991 
    4992 private:
    4993  /*
    4994  There are two suballocation vectors, used in ping-pong way.
    4995  The one with index m_1stVectorIndex is called 1st.
    4996  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    4997  2nd can be non-empty only when 1st is not empty.
    4998  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    4999  */
    5000  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5001 
    5002  enum SECOND_VECTOR_MODE
    5003  {
    5004  SECOND_VECTOR_EMPTY,
    5005  /*
    5006  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5007  all have smaller offset.
    5008  */
    5009  SECOND_VECTOR_RING_BUFFER,
    5010  /*
    5011  Suballocations in 2nd vector are upper side of double stack.
    5012  They all have offsets higher than those in 1st vector.
    5013  Top of this stack means smaller offsets, but higher indices in this vector.
    5014  */
    5015  SECOND_VECTOR_DOUBLE_STACK,
    5016  };
    5017 
    5018  VkDeviceSize m_SumFreeSize;
    5019  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5020  uint32_t m_1stVectorIndex;
    5021  SECOND_VECTOR_MODE m_2ndVectorMode;
    5022 
    5023  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5024  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5025  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5026  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5027 
    5028  // Number of items in 1st vector with hAllocation = null at the beginning.
    5029  size_t m_1stNullItemsBeginCount;
    5030  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5031  size_t m_1stNullItemsMiddleCount;
    5032  // Number of items in 2nd vector with hAllocation = null.
    5033  size_t m_2ndNullItemsCount;
    5034 
    5035  bool ShouldCompact1st() const;
    5036  void CleanupAfterFree();
    5037 };
    5038 
    5039 /*
    5040 - GetSize() is the original size of allocated memory block.
    5041 - m_UsableSize is this size aligned down to a power of two.
    5042  All allocations and calculations happen relative to m_UsableSize.
    5043 - GetUnusableSize() is the difference between them.
    5044  It is repoted as separate, unused range, not available for allocations.
    5045 
    5046 Node at level 0 has size = m_UsableSize.
    5047 Each next level contains nodes with size 2 times smaller than current level.
    5048 m_LevelCount is the maximum number of levels to use in the current object.
    5049 */
    5050 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5051 {
    5052  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5053 public:
    5054  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5055  virtual ~VmaBlockMetadata_Buddy();
    5056  virtual void Init(VkDeviceSize size);
    5057 
    5058  virtual bool Validate() const;
    5059  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5060  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5061  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5062  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5063 
    5064  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5065  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5066 
    5067 #if VMA_STATS_STRING_ENABLED
    5068  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5069 #endif
    5070 
    5071  virtual bool CreateAllocationRequest(
    5072  uint32_t currentFrameIndex,
    5073  uint32_t frameInUseCount,
    5074  VkDeviceSize bufferImageGranularity,
    5075  VkDeviceSize allocSize,
    5076  VkDeviceSize allocAlignment,
    5077  bool upperAddress,
    5078  VmaSuballocationType allocType,
    5079  bool canMakeOtherLost,
    5080  uint32_t strategy,
    5081  VmaAllocationRequest* pAllocationRequest);
    5082 
    5083  virtual bool MakeRequestedAllocationsLost(
    5084  uint32_t currentFrameIndex,
    5085  uint32_t frameInUseCount,
    5086  VmaAllocationRequest* pAllocationRequest);
    5087 
    5088  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5089 
    5090  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5091 
    5092  virtual void Alloc(
    5093  const VmaAllocationRequest& request,
    5094  VmaSuballocationType type,
    5095  VkDeviceSize allocSize,
    5096  bool upperAddress,
    5097  VmaAllocation hAllocation);
    5098 
    5099  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5100  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5101 
    5102 private:
    5103  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5104  static const size_t MAX_LEVELS = 30;
    5105 
    5106  struct ValidationContext
    5107  {
    5108  size_t calculatedAllocationCount;
    5109  size_t calculatedFreeCount;
    5110  VkDeviceSize calculatedSumFreeSize;
    5111 
    5112  ValidationContext() :
    5113  calculatedAllocationCount(0),
    5114  calculatedFreeCount(0),
    5115  calculatedSumFreeSize(0) { }
    5116  };
    5117 
    5118  struct Node
    5119  {
    5120  VkDeviceSize offset;
    5121  enum TYPE
    5122  {
    5123  TYPE_FREE,
    5124  TYPE_ALLOCATION,
    5125  TYPE_SPLIT,
    5126  TYPE_COUNT
    5127  } type;
    5128  Node* parent;
    5129  Node* buddy;
    5130 
    5131  union
    5132  {
    5133  struct
    5134  {
    5135  Node* prev;
    5136  Node* next;
    5137  } free;
    5138  struct
    5139  {
    5140  VmaAllocation alloc;
    5141  } allocation;
    5142  struct
    5143  {
    5144  Node* leftChild;
    5145  } split;
    5146  };
    5147  };
    5148 
    5149  // Size of the memory block aligned down to a power of two.
    5150  VkDeviceSize m_UsableSize;
    5151  uint32_t m_LevelCount;
    5152 
    5153  Node* m_Root;
    5154  struct {
    5155  Node* front;
    5156  Node* back;
    5157  } m_FreeList[MAX_LEVELS];
    5158  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5159  size_t m_AllocationCount;
    5160  // Number of nodes in the tree with type == TYPE_FREE.
    5161  size_t m_FreeCount;
    5162  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5163  VkDeviceSize m_SumFreeSize;
    5164 
    5165  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5166  void DeleteNode(Node* node);
    5167  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5168  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5169  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5170  // Alloc passed just for validation. Can be null.
    5171  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5172  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5173  // Adds node to the front of FreeList at given level.
    5174  // node->type must be FREE.
    5175  // node->free.prev, next can be undefined.
    5176  void AddToFreeListFront(uint32_t level, Node* node);
    5177  // Removes node from FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next stay untouched.
    5180  void RemoveFromFreeList(uint32_t level, Node* node);
    5181 
    5182 #if VMA_STATS_STRING_ENABLED
    5183  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5184 #endif
    5185 };
    5186 
    5187 /*
    5188 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5189 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5190 
    5191 Thread-safety: This class must be externally synchronized.
    5192 */
    5193 class VmaDeviceMemoryBlock
    5194 {
    5195  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5196 public:
    5197  VmaBlockMetadata* m_pMetadata;
    5198 
    5199  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5200 
    5201  ~VmaDeviceMemoryBlock()
    5202  {
    5203  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5204  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5205  }
    5206 
    5207  // Always call after construction.
    5208  void Init(
    5209  VmaAllocator hAllocator,
    5210  uint32_t newMemoryTypeIndex,
    5211  VkDeviceMemory newMemory,
    5212  VkDeviceSize newSize,
    5213  uint32_t id,
    5214  uint32_t algorithm);
    5215  // Always call before destruction.
    5216  void Destroy(VmaAllocator allocator);
    5217 
    5218  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5219  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5220  uint32_t GetId() const { return m_Id; }
    5221  void* GetMappedData() const { return m_pMappedData; }
    5222 
    5223  // Validates all data structures inside this object. If not valid, returns false.
    5224  bool Validate() const;
    5225 
    5226  VkResult CheckCorruption(VmaAllocator hAllocator);
    5227 
    5228  // ppData can be null.
    5229  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5230  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5231 
    5232  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5233  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5234 
    5235  VkResult BindBufferMemory(
    5236  const VmaAllocator hAllocator,
    5237  const VmaAllocation hAllocation,
    5238  VkBuffer hBuffer);
    5239  VkResult BindImageMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkImage hImage);
    5243 
    5244 private:
    5245  uint32_t m_MemoryTypeIndex;
    5246  uint32_t m_Id;
    5247  VkDeviceMemory m_hMemory;
    5248 
    5249  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5250  // Also protects m_MapCount, m_pMappedData.
    5251  VMA_MUTEX m_Mutex;
    5252  uint32_t m_MapCount;
    5253  void* m_pMappedData;
    5254 };
    5255 
    5256 struct VmaPointerLess
    5257 {
    5258  bool operator()(const void* lhs, const void* rhs) const
    5259  {
    5260  return lhs < rhs;
    5261  }
    5262 };
    5263 
    5264 class VmaDefragmentator;
    5265 
    5266 /*
    5267 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5268 Vulkan memory type.
    5269 
    5270 Synchronized internally with a mutex.
    5271 */
    5272 struct VmaBlockVector
    5273 {
    5274  VMA_CLASS_NO_COPY(VmaBlockVector)
    5275 public:
    5276  VmaBlockVector(
    5277  VmaAllocator hAllocator,
    5278  uint32_t memoryTypeIndex,
    5279  VkDeviceSize preferredBlockSize,
    5280  size_t minBlockCount,
    5281  size_t maxBlockCount,
    5282  VkDeviceSize bufferImageGranularity,
    5283  uint32_t frameInUseCount,
    5284  bool isCustomPool,
    5285  bool explicitBlockSize,
    5286  uint32_t algorithm);
    5287  ~VmaBlockVector();
    5288 
    5289  VkResult CreateMinBlocks();
    5290 
    5291  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5292  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5293  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5294  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5295  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5296 
    5297  void GetPoolStats(VmaPoolStats* pStats);
    5298 
    5299  bool IsEmpty() const { return m_Blocks.empty(); }
    5300  bool IsCorruptionDetectionEnabled() const;
    5301 
    5302  VkResult Allocate(
    5303  VmaPool hCurrentPool,
    5304  uint32_t currentFrameIndex,
    5305  VkDeviceSize size,
    5306  VkDeviceSize alignment,
    5307  const VmaAllocationCreateInfo& createInfo,
    5308  VmaSuballocationType suballocType,
    5309  VmaAllocation* pAllocation);
    5310 
    5311  void Free(
    5312  VmaAllocation hAllocation);
    5313 
    5314  // Adds statistics of this BlockVector to pStats.
    5315  void AddStats(VmaStats* pStats);
    5316 
    5317 #if VMA_STATS_STRING_ENABLED
    5318  void PrintDetailedMap(class VmaJsonWriter& json);
    5319 #endif
    5320 
    5321  void MakePoolAllocationsLost(
    5322  uint32_t currentFrameIndex,
    5323  size_t* pLostAllocationCount);
    5324  VkResult CheckCorruption();
    5325 
    5326  VmaDefragmentator* EnsureDefragmentator(
    5327  VmaAllocator hAllocator,
    5328  uint32_t currentFrameIndex);
    5329 
    5330  VkResult Defragment(
    5331  VmaDefragmentationStats* pDefragmentationStats,
    5332  VkDeviceSize& maxBytesToMove,
    5333  uint32_t& maxAllocationsToMove);
    5334 
    5335  void DestroyDefragmentator();
    5336 
    5337 private:
    5338  friend class VmaDefragmentator;
    5339 
    5340  const VmaAllocator m_hAllocator;
    5341  const uint32_t m_MemoryTypeIndex;
    5342  const VkDeviceSize m_PreferredBlockSize;
    5343  const size_t m_MinBlockCount;
    5344  const size_t m_MaxBlockCount;
    5345  const VkDeviceSize m_BufferImageGranularity;
    5346  const uint32_t m_FrameInUseCount;
    5347  const bool m_IsCustomPool;
    5348  const bool m_ExplicitBlockSize;
    5349  const uint32_t m_Algorithm;
    5350  bool m_HasEmptyBlock;
    5351  VMA_MUTEX m_Mutex;
    5352  // Incrementally sorted by sumFreeSize, ascending.
    5353  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5354  /* There can be at most one allocation that is completely empty - a
    5355  hysteresis to avoid pessimistic case of alternating creation and destruction
    5356  of a VkDeviceMemory. */
    5357  VmaDefragmentator* m_pDefragmentator;
    5358  uint32_t m_NextBlockId;
    5359 
    5360  VkDeviceSize CalcMaxBlockSize() const;
    5361 
    5362  // Finds and removes given block from vector.
    5363  void Remove(VmaDeviceMemoryBlock* pBlock);
    5364 
    5365  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5366  // after this call.
    5367  void IncrementallySortBlocks();
    5368 
    5369  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5370  VkResult AllocateFromBlock(
    5371  VmaDeviceMemoryBlock* pBlock,
    5372  VmaPool hCurrentPool,
    5373  uint32_t currentFrameIndex,
    5374  VkDeviceSize size,
    5375  VkDeviceSize alignment,
    5376  VmaAllocationCreateFlags allocFlags,
    5377  void* pUserData,
    5378  VmaSuballocationType suballocType,
    5379  uint32_t strategy,
    5380  VmaAllocation* pAllocation);
    5381 
    5382  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5383 };
    5384 
    5385 struct VmaPool_T
    5386 {
    5387  VMA_CLASS_NO_COPY(VmaPool_T)
    5388 public:
    5389  VmaBlockVector m_BlockVector;
    5390 
    5391  VmaPool_T(
    5392  VmaAllocator hAllocator,
    5393  const VmaPoolCreateInfo& createInfo,
    5394  VkDeviceSize preferredBlockSize);
    5395  ~VmaPool_T();
    5396 
    5397  uint32_t GetId() const { return m_Id; }
    5398  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5399 
    5400 #if VMA_STATS_STRING_ENABLED
    5401  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5402 #endif
    5403 
    5404 private:
    5405  uint32_t m_Id;
    5406 };
    5407 
    5408 class VmaDefragmentator
    5409 {
    5410  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5411 private:
    5412  const VmaAllocator m_hAllocator;
    5413  VmaBlockVector* const m_pBlockVector;
    5414  uint32_t m_CurrentFrameIndex;
    5415  VkDeviceSize m_BytesMoved;
    5416  uint32_t m_AllocationsMoved;
    5417 
    5418  struct AllocationInfo
    5419  {
    5420  VmaAllocation m_hAllocation;
    5421  VkBool32* m_pChanged;
    5422 
    5423  AllocationInfo() :
    5424  m_hAllocation(VK_NULL_HANDLE),
    5425  m_pChanged(VMA_NULL)
    5426  {
    5427  }
    5428  };
    5429 
    5430  struct AllocationInfoSizeGreater
    5431  {
    5432  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5433  {
    5434  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5435  }
    5436  };
    5437 
    5438  // Used between AddAllocation and Defragment.
    5439  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5440 
    5441  struct BlockInfo
    5442  {
    5443  VmaDeviceMemoryBlock* m_pBlock;
    5444  bool m_HasNonMovableAllocations;
    5445  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5446 
    5447  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5448  m_pBlock(VMA_NULL),
    5449  m_HasNonMovableAllocations(true),
    5450  m_Allocations(pAllocationCallbacks),
    5451  m_pMappedDataForDefragmentation(VMA_NULL)
    5452  {
    5453  }
    5454 
    5455  void CalcHasNonMovableAllocations()
    5456  {
    5457  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5458  const size_t defragmentAllocCount = m_Allocations.size();
    5459  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5460  }
    5461 
    5462  void SortAllocationsBySizeDescecnding()
    5463  {
    5464  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5465  }
    5466 
    5467  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5468  void Unmap(VmaAllocator hAllocator);
    5469 
    5470  private:
    5471  // Not null if mapped for defragmentation only, not originally mapped.
    5472  void* m_pMappedDataForDefragmentation;
    5473  };
    5474 
    5475  struct BlockPointerLess
    5476  {
    5477  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5478  {
    5479  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5480  }
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5484  }
    5485  };
    5486 
    5487  // 1. Blocks with some non-movable allocations go first.
    5488  // 2. Blocks with smaller sumFreeSize go first.
    5489  struct BlockInfoCompareMoveDestination
    5490  {
    5491  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5492  {
    5493  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5494  {
    5495  return true;
    5496  }
    5497  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return false;
    5500  }
    5501  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5502  {
    5503  return true;
    5504  }
    5505  return false;
    5506  }
    5507  };
    5508 
    5509  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5510  BlockInfoVector m_Blocks;
    5511 
    5512  VkResult DefragmentRound(
    5513  VkDeviceSize maxBytesToMove,
    5514  uint32_t maxAllocationsToMove);
    5515 
    5516  static bool MoveMakesSense(
    5517  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5518  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5519 
    5520 public:
    5521  VmaDefragmentator(
    5522  VmaAllocator hAllocator,
    5523  VmaBlockVector* pBlockVector,
    5524  uint32_t currentFrameIndex);
    5525 
    5526  ~VmaDefragmentator();
    5527 
    5528  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5529  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5530 
    5531  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5532 
    5533  VkResult Defragment(
    5534  VkDeviceSize maxBytesToMove,
    5535  uint32_t maxAllocationsToMove);
    5536 };
    5537 
    5538 #if VMA_RECORDING_ENABLED
    5539 
    5540 class VmaRecorder
    5541 {
    5542 public:
    5543  VmaRecorder();
    5544  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5545  void WriteConfiguration(
    5546  const VkPhysicalDeviceProperties& devProps,
    5547  const VkPhysicalDeviceMemoryProperties& memProps,
    5548  bool dedicatedAllocationExtensionEnabled);
    5549  ~VmaRecorder();
    5550 
    5551  void RecordCreateAllocator(uint32_t frameIndex);
    5552  void RecordDestroyAllocator(uint32_t frameIndex);
    5553  void RecordCreatePool(uint32_t frameIndex,
    5554  const VmaPoolCreateInfo& createInfo,
    5555  VmaPool pool);
    5556  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5557  void RecordAllocateMemory(uint32_t frameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  const VmaAllocationCreateInfo& createInfo,
    5560  VmaAllocation allocation);
    5561  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  bool requiresDedicatedAllocation,
    5564  bool prefersDedicatedAllocation,
    5565  const VmaAllocationCreateInfo& createInfo,
    5566  VmaAllocation allocation);
    5567  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5568  const VkMemoryRequirements& vkMemReq,
    5569  bool requiresDedicatedAllocation,
    5570  bool prefersDedicatedAllocation,
    5571  const VmaAllocationCreateInfo& createInfo,
    5572  VmaAllocation allocation);
    5573  void RecordFreeMemory(uint32_t frameIndex,
    5574  VmaAllocation allocation);
    5575  void RecordSetAllocationUserData(uint32_t frameIndex,
    5576  VmaAllocation allocation,
    5577  const void* pUserData);
    5578  void RecordCreateLostAllocation(uint32_t frameIndex,
    5579  VmaAllocation allocation);
    5580  void RecordMapMemory(uint32_t frameIndex,
    5581  VmaAllocation allocation);
    5582  void RecordUnmapMemory(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordFlushAllocation(uint32_t frameIndex,
    5585  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5586  void RecordInvalidateAllocation(uint32_t frameIndex,
    5587  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5588  void RecordCreateBuffer(uint32_t frameIndex,
    5589  const VkBufferCreateInfo& bufCreateInfo,
    5590  const VmaAllocationCreateInfo& allocCreateInfo,
    5591  VmaAllocation allocation);
    5592  void RecordCreateImage(uint32_t frameIndex,
    5593  const VkImageCreateInfo& imageCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordDestroyBuffer(uint32_t frameIndex,
    5597  VmaAllocation allocation);
    5598  void RecordDestroyImage(uint32_t frameIndex,
    5599  VmaAllocation allocation);
    5600  void RecordTouchAllocation(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordGetAllocationInfo(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5605  VmaPool pool);
    5606 
    5607 private:
    5608  struct CallParams
    5609  {
    5610  uint32_t threadId;
    5611  double time;
    5612  };
    5613 
    5614  class UserDataString
    5615  {
    5616  public:
    5617  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5618  const char* GetString() const { return m_Str; }
    5619 
    5620  private:
    5621  char m_PtrStr[17];
    5622  const char* m_Str;
    5623  };
    5624 
    5625  bool m_UseMutex;
    5626  VmaRecordFlags m_Flags;
    5627  FILE* m_File;
    5628  VMA_MUTEX m_FileMutex;
    5629  int64_t m_Freq;
    5630  int64_t m_StartCounter;
    5631 
    5632  void GetBasicParams(CallParams& outParams);
    5633  void Flush();
    5634 };
    5635 
    5636 #endif // #if VMA_RECORDING_ENABLED
    5637 
    5638 // Main allocator object.
    5639 struct VmaAllocator_T
    5640 {
    5641  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5642 public:
    5643  bool m_UseMutex;
    5644  bool m_UseKhrDedicatedAllocation;
    5645  VkDevice m_hDevice;
    5646  bool m_AllocationCallbacksSpecified;
    5647  VkAllocationCallbacks m_AllocationCallbacks;
    5648  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5649 
    5650  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5651  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5652  VMA_MUTEX m_HeapSizeLimitMutex;
    5653 
    5654  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5655  VkPhysicalDeviceMemoryProperties m_MemProps;
    5656 
    5657  // Default pools.
    5658  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5659 
    5660  // Each vector is sorted by memory (handle value).
    5661  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5662  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5663  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5664 
    5665  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5666  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5667  ~VmaAllocator_T();
    5668 
    5669  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5670  {
    5671  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5672  }
    5673  const VmaVulkanFunctions& GetVulkanFunctions() const
    5674  {
    5675  return m_VulkanFunctions;
    5676  }
    5677 
    5678  VkDeviceSize GetBufferImageGranularity() const
    5679  {
    5680  return VMA_MAX(
    5681  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5682  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5683  }
    5684 
    5685  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5686  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5687 
    5688  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5689  {
    5690  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5691  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5692  }
    5693  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5694  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5695  {
    5696  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5697  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5698  }
    5699  // Minimum alignment for all allocations in specific memory type.
    5700  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5701  {
    5702  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5703  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5704  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5705  }
    5706 
    5707  bool IsIntegratedGpu() const
    5708  {
    5709  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5710  }
    5711 
    5712 #if VMA_RECORDING_ENABLED
    5713  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5714 #endif
    5715 
    5716  void GetBufferMemoryRequirements(
    5717  VkBuffer hBuffer,
    5718  VkMemoryRequirements& memReq,
    5719  bool& requiresDedicatedAllocation,
    5720  bool& prefersDedicatedAllocation) const;
    5721  void GetImageMemoryRequirements(
    5722  VkImage hImage,
    5723  VkMemoryRequirements& memReq,
    5724  bool& requiresDedicatedAllocation,
    5725  bool& prefersDedicatedAllocation) const;
    5726 
    5727  // Main allocation function.
    5728  VkResult AllocateMemory(
    5729  const VkMemoryRequirements& vkMemReq,
    5730  bool requiresDedicatedAllocation,
    5731  bool prefersDedicatedAllocation,
    5732  VkBuffer dedicatedBuffer,
    5733  VkImage dedicatedImage,
    5734  const VmaAllocationCreateInfo& createInfo,
    5735  VmaSuballocationType suballocType,
    5736  VmaAllocation* pAllocation);
    5737 
    5738  // Main deallocation function.
    5739  void FreeMemory(const VmaAllocation allocation);
    5740 
    5741  void CalculateStats(VmaStats* pStats);
    5742 
    5743 #if VMA_STATS_STRING_ENABLED
    5744  void PrintDetailedMap(class VmaJsonWriter& json);
    5745 #endif
    5746 
    5747  VkResult Defragment(
    5748  VmaAllocation* pAllocations,
    5749  size_t allocationCount,
    5750  VkBool32* pAllocationsChanged,
    5751  const VmaDefragmentationInfo* pDefragmentationInfo,
    5752  VmaDefragmentationStats* pDefragmentationStats);
    5753 
    5754  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5755  bool TouchAllocation(VmaAllocation hAllocation);
    5756 
    5757  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5758  void DestroyPool(VmaPool pool);
    5759  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5760 
    5761  void SetCurrentFrameIndex(uint32_t frameIndex);
    5762  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5763 
    5764  void MakePoolAllocationsLost(
    5765  VmaPool hPool,
    5766  size_t* pLostAllocationCount);
    5767  VkResult CheckPoolCorruption(VmaPool hPool);
    5768  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5769 
    5770  void CreateLostAllocation(VmaAllocation* pAllocation);
    5771 
    5772  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5773  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5774 
    5775  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5776  void Unmap(VmaAllocation hAllocation);
    5777 
    5778  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5779  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5780 
    5781  void FlushOrInvalidateAllocation(
    5782  VmaAllocation hAllocation,
    5783  VkDeviceSize offset, VkDeviceSize size,
    5784  VMA_CACHE_OPERATION op);
    5785 
    5786  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5787 
    5788 private:
    5789  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5790 
    5791  VkPhysicalDevice m_PhysicalDevice;
    5792  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5793 
    5794  VMA_MUTEX m_PoolsMutex;
    5795  // Protected by m_PoolsMutex. Sorted by pointer value.
    5796  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5797  uint32_t m_NextPoolId;
    5798 
    5799  VmaVulkanFunctions m_VulkanFunctions;
    5800 
    5801 #if VMA_RECORDING_ENABLED
    5802  VmaRecorder* m_pRecorder;
    5803 #endif
    5804 
    5805  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5806 
    5807  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5808 
    5809  VkResult AllocateMemoryOfType(
    5810  VkDeviceSize size,
    5811  VkDeviceSize alignment,
    5812  bool dedicatedAllocation,
    5813  VkBuffer dedicatedBuffer,
    5814  VkImage dedicatedImage,
    5815  const VmaAllocationCreateInfo& createInfo,
    5816  uint32_t memTypeIndex,
    5817  VmaSuballocationType suballocType,
    5818  VmaAllocation* pAllocation);
    5819 
    5820  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5821  VkResult AllocateDedicatedMemory(
    5822  VkDeviceSize size,
    5823  VmaSuballocationType suballocType,
    5824  uint32_t memTypeIndex,
    5825  bool map,
    5826  bool isUserDataString,
    5827  void* pUserData,
    5828  VkBuffer dedicatedBuffer,
    5829  VkImage dedicatedImage,
    5830  VmaAllocation* pAllocation);
    5831 
    5832  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5833  void FreeDedicatedMemory(VmaAllocation allocation);
    5834 };
    5835 
    5837 // Memory allocation #2 after VmaAllocator_T definition
    5838 
    5839 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5840 {
    5841  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5842 }
    5843 
    5844 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5845 {
    5846  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5847 }
    5848 
    5849 template<typename T>
    5850 static T* VmaAllocate(VmaAllocator hAllocator)
    5851 {
    5852  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5853 }
    5854 
    5855 template<typename T>
    5856 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5857 {
    5858  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5859 }
    5860 
    5861 template<typename T>
    5862 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5863 {
    5864  if(ptr != VMA_NULL)
    5865  {
    5866  ptr->~T();
    5867  VmaFree(hAllocator, ptr);
    5868  }
    5869 }
    5870 
    5871 template<typename T>
    5872 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5873 {
    5874  if(ptr != VMA_NULL)
    5875  {
    5876  for(size_t i = count; i--; )
    5877  ptr[i].~T();
    5878  VmaFree(hAllocator, ptr);
    5879  }
    5880 }
    5881 
    5883 // VmaStringBuilder
    5884 
    5885 #if VMA_STATS_STRING_ENABLED
    5886 
    5887 class VmaStringBuilder
    5888 {
    5889 public:
    5890  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5891  size_t GetLength() const { return m_Data.size(); }
    5892  const char* GetData() const { return m_Data.data(); }
    5893 
    5894  void Add(char ch) { m_Data.push_back(ch); }
    5895  void Add(const char* pStr);
    5896  void AddNewLine() { Add('\n'); }
    5897  void AddNumber(uint32_t num);
    5898  void AddNumber(uint64_t num);
    5899  void AddPointer(const void* ptr);
    5900 
    5901 private:
    5902  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5903 };
    5904 
    5905 void VmaStringBuilder::Add(const char* pStr)
    5906 {
    5907  const size_t strLen = strlen(pStr);
    5908  if(strLen > 0)
    5909  {
    5910  const size_t oldCount = m_Data.size();
    5911  m_Data.resize(oldCount + strLen);
    5912  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5913  }
    5914 }
    5915 
    5916 void VmaStringBuilder::AddNumber(uint32_t num)
    5917 {
    5918  char buf[11];
    5919  VmaUint32ToStr(buf, sizeof(buf), num);
    5920  Add(buf);
    5921 }
    5922 
    5923 void VmaStringBuilder::AddNumber(uint64_t num)
    5924 {
    5925  char buf[21];
    5926  VmaUint64ToStr(buf, sizeof(buf), num);
    5927  Add(buf);
    5928 }
    5929 
    5930 void VmaStringBuilder::AddPointer(const void* ptr)
    5931 {
    5932  char buf[21];
    5933  VmaPtrToStr(buf, sizeof(buf), ptr);
    5934  Add(buf);
    5935 }
    5936 
    5937 #endif // #if VMA_STATS_STRING_ENABLED
    5938 
    5940 // VmaJsonWriter
    5941 
    5942 #if VMA_STATS_STRING_ENABLED
    5943 
    5944 class VmaJsonWriter
    5945 {
    5946  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5947 public:
    5948  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5949  ~VmaJsonWriter();
    5950 
    5951  void BeginObject(bool singleLine = false);
    5952  void EndObject();
    5953 
    5954  void BeginArray(bool singleLine = false);
    5955  void EndArray();
    5956 
    5957  void WriteString(const char* pStr);
    5958  void BeginString(const char* pStr = VMA_NULL);
    5959  void ContinueString(const char* pStr);
    5960  void ContinueString(uint32_t n);
    5961  void ContinueString(uint64_t n);
    5962  void ContinueString_Pointer(const void* ptr);
    5963  void EndString(const char* pStr = VMA_NULL);
    5964 
    5965  void WriteNumber(uint32_t n);
    5966  void WriteNumber(uint64_t n);
    5967  void WriteBool(bool b);
    5968  void WriteNull();
    5969 
    5970 private:
    5971  static const char* const INDENT;
    5972 
    5973  enum COLLECTION_TYPE
    5974  {
    5975  COLLECTION_TYPE_OBJECT,
    5976  COLLECTION_TYPE_ARRAY,
    5977  };
    5978  struct StackItem
    5979  {
    5980  COLLECTION_TYPE type;
    5981  uint32_t valueCount;
    5982  bool singleLineMode;
    5983  };
    5984 
    5985  VmaStringBuilder& m_SB;
    5986  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5987  bool m_InsideString;
    5988 
    5989  void BeginValue(bool isString);
    5990  void WriteIndent(bool oneLess = false);
    5991 };
    5992 
    5993 const char* const VmaJsonWriter::INDENT = " ";
    5994 
    5995 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    5996  m_SB(sb),
    5997  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    5998  m_InsideString(false)
    5999 {
    6000 }
    6001 
    6002 VmaJsonWriter::~VmaJsonWriter()
    6003 {
    6004  VMA_ASSERT(!m_InsideString);
    6005  VMA_ASSERT(m_Stack.empty());
    6006 }
    6007 
    6008 void VmaJsonWriter::BeginObject(bool singleLine)
    6009 {
    6010  VMA_ASSERT(!m_InsideString);
    6011 
    6012  BeginValue(false);
    6013  m_SB.Add('{');
    6014 
    6015  StackItem item;
    6016  item.type = COLLECTION_TYPE_OBJECT;
    6017  item.valueCount = 0;
    6018  item.singleLineMode = singleLine;
    6019  m_Stack.push_back(item);
    6020 }
    6021 
    6022 void VmaJsonWriter::EndObject()
    6023 {
    6024  VMA_ASSERT(!m_InsideString);
    6025 
    6026  WriteIndent(true);
    6027  m_SB.Add('}');
    6028 
    6029  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6030  m_Stack.pop_back();
    6031 }
    6032 
    6033 void VmaJsonWriter::BeginArray(bool singleLine)
    6034 {
    6035  VMA_ASSERT(!m_InsideString);
    6036 
    6037  BeginValue(false);
    6038  m_SB.Add('[');
    6039 
    6040  StackItem item;
    6041  item.type = COLLECTION_TYPE_ARRAY;
    6042  item.valueCount = 0;
    6043  item.singleLineMode = singleLine;
    6044  m_Stack.push_back(item);
    6045 }
    6046 
    6047 void VmaJsonWriter::EndArray()
    6048 {
    6049  VMA_ASSERT(!m_InsideString);
    6050 
    6051  WriteIndent(true);
    6052  m_SB.Add(']');
    6053 
    6054  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6055  m_Stack.pop_back();
    6056 }
    6057 
    6058 void VmaJsonWriter::WriteString(const char* pStr)
    6059 {
    6060  BeginString(pStr);
    6061  EndString();
    6062 }
    6063 
    6064 void VmaJsonWriter::BeginString(const char* pStr)
    6065 {
    6066  VMA_ASSERT(!m_InsideString);
    6067 
    6068  BeginValue(true);
    6069  m_SB.Add('"');
    6070  m_InsideString = true;
    6071  if(pStr != VMA_NULL && pStr[0] != '\0')
    6072  {
    6073  ContinueString(pStr);
    6074  }
    6075 }
    6076 
    6077 void VmaJsonWriter::ContinueString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(m_InsideString);
    6080 
    6081  const size_t strLen = strlen(pStr);
    6082  for(size_t i = 0; i < strLen; ++i)
    6083  {
    6084  char ch = pStr[i];
    6085  if(ch == '\\')
    6086  {
    6087  m_SB.Add("\\\\");
    6088  }
    6089  else if(ch == '"')
    6090  {
    6091  m_SB.Add("\\\"");
    6092  }
    6093  else if(ch >= 32)
    6094  {
    6095  m_SB.Add(ch);
    6096  }
    6097  else switch(ch)
    6098  {
    6099  case '\b':
    6100  m_SB.Add("\\b");
    6101  break;
    6102  case '\f':
    6103  m_SB.Add("\\f");
    6104  break;
    6105  case '\n':
    6106  m_SB.Add("\\n");
    6107  break;
    6108  case '\r':
    6109  m_SB.Add("\\r");
    6110  break;
    6111  case '\t':
    6112  m_SB.Add("\\t");
    6113  break;
    6114  default:
    6115  VMA_ASSERT(0 && "Character not currently supported.");
    6116  break;
    6117  }
    6118  }
    6119 }
    6120 
    6121 void VmaJsonWriter::ContinueString(uint32_t n)
    6122 {
    6123  VMA_ASSERT(m_InsideString);
    6124  m_SB.AddNumber(n);
    6125 }
    6126 
    6127 void VmaJsonWriter::ContinueString(uint64_t n)
    6128 {
    6129  VMA_ASSERT(m_InsideString);
    6130  m_SB.AddNumber(n);
    6131 }
    6132 
    6133 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6134 {
    6135  VMA_ASSERT(m_InsideString);
    6136  m_SB.AddPointer(ptr);
    6137 }
    6138 
    6139 void VmaJsonWriter::EndString(const char* pStr)
    6140 {
    6141  VMA_ASSERT(m_InsideString);
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146  m_SB.Add('"');
    6147  m_InsideString = false;
    6148 }
    6149 
    6150 void VmaJsonWriter::WriteNumber(uint32_t n)
    6151 {
    6152  VMA_ASSERT(!m_InsideString);
    6153  BeginValue(false);
    6154  m_SB.AddNumber(n);
    6155 }
    6156 
    6157 void VmaJsonWriter::WriteNumber(uint64_t n)
    6158 {
    6159  VMA_ASSERT(!m_InsideString);
    6160  BeginValue(false);
    6161  m_SB.AddNumber(n);
    6162 }
    6163 
    6164 void VmaJsonWriter::WriteBool(bool b)
    6165 {
    6166  VMA_ASSERT(!m_InsideString);
    6167  BeginValue(false);
    6168  m_SB.Add(b ? "true" : "false");
    6169 }
    6170 
    6171 void VmaJsonWriter::WriteNull()
    6172 {
    6173  VMA_ASSERT(!m_InsideString);
    6174  BeginValue(false);
    6175  m_SB.Add("null");
    6176 }
    6177 
    6178 void VmaJsonWriter::BeginValue(bool isString)
    6179 {
    6180  if(!m_Stack.empty())
    6181  {
    6182  StackItem& currItem = m_Stack.back();
    6183  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6184  currItem.valueCount % 2 == 0)
    6185  {
    6186  VMA_ASSERT(isString);
    6187  }
    6188 
    6189  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6190  currItem.valueCount % 2 != 0)
    6191  {
    6192  m_SB.Add(": ");
    6193  }
    6194  else if(currItem.valueCount > 0)
    6195  {
    6196  m_SB.Add(", ");
    6197  WriteIndent();
    6198  }
    6199  else
    6200  {
    6201  WriteIndent();
    6202  }
    6203  ++currItem.valueCount;
    6204  }
    6205 }
    6206 
    6207 void VmaJsonWriter::WriteIndent(bool oneLess)
    6208 {
    6209  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6210  {
    6211  m_SB.AddNewLine();
    6212 
    6213  size_t count = m_Stack.size();
    6214  if(count > 0 && oneLess)
    6215  {
    6216  --count;
    6217  }
    6218  for(size_t i = 0; i < count; ++i)
    6219  {
    6220  m_SB.Add(INDENT);
    6221  }
    6222  }
    6223 }
    6224 
    6225 #endif // #if VMA_STATS_STRING_ENABLED
    6226 
    6228 
    6229 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6230 {
    6231  if(IsUserDataString())
    6232  {
    6233  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6234 
    6235  FreeUserDataString(hAllocator);
    6236 
    6237  if(pUserData != VMA_NULL)
    6238  {
    6239  const char* const newStrSrc = (char*)pUserData;
    6240  const size_t newStrLen = strlen(newStrSrc);
    6241  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6242  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6243  m_pUserData = newStrDst;
    6244  }
    6245  }
    6246  else
    6247  {
    6248  m_pUserData = pUserData;
    6249  }
    6250 }
    6251 
    6252 void VmaAllocation_T::ChangeBlockAllocation(
    6253  VmaAllocator hAllocator,
    6254  VmaDeviceMemoryBlock* block,
    6255  VkDeviceSize offset)
    6256 {
    6257  VMA_ASSERT(block != VMA_NULL);
    6258  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6259 
    6260  // Move mapping reference counter from old block to new block.
    6261  if(block != m_BlockAllocation.m_Block)
    6262  {
    6263  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6264  if(IsPersistentMap())
    6265  ++mapRefCount;
    6266  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6267  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6268  }
    6269 
    6270  m_BlockAllocation.m_Block = block;
    6271  m_BlockAllocation.m_Offset = offset;
    6272 }
    6273 
    6274 VkDeviceSize VmaAllocation_T::GetOffset() const
    6275 {
    6276  switch(m_Type)
    6277  {
    6278  case ALLOCATION_TYPE_BLOCK:
    6279  return m_BlockAllocation.m_Offset;
    6280  case ALLOCATION_TYPE_DEDICATED:
    6281  return 0;
    6282  default:
    6283  VMA_ASSERT(0);
    6284  return 0;
    6285  }
    6286 }
    6287 
    6288 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6289 {
    6290  switch(m_Type)
    6291  {
    6292  case ALLOCATION_TYPE_BLOCK:
    6293  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6294  case ALLOCATION_TYPE_DEDICATED:
    6295  return m_DedicatedAllocation.m_hMemory;
    6296  default:
    6297  VMA_ASSERT(0);
    6298  return VK_NULL_HANDLE;
    6299  }
    6300 }
    6301 
    6302 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6303 {
    6304  switch(m_Type)
    6305  {
    6306  case ALLOCATION_TYPE_BLOCK:
    6307  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6308  case ALLOCATION_TYPE_DEDICATED:
    6309  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6310  default:
    6311  VMA_ASSERT(0);
    6312  return UINT32_MAX;
    6313  }
    6314 }
    6315 
    6316 void* VmaAllocation_T::GetMappedData() const
    6317 {
    6318  switch(m_Type)
    6319  {
    6320  case ALLOCATION_TYPE_BLOCK:
    6321  if(m_MapCount != 0)
    6322  {
    6323  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6324  VMA_ASSERT(pBlockData != VMA_NULL);
    6325  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6326  }
    6327  else
    6328  {
    6329  return VMA_NULL;
    6330  }
    6331  break;
    6332  case ALLOCATION_TYPE_DEDICATED:
    6333  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6334  return m_DedicatedAllocation.m_pMappedData;
    6335  default:
    6336  VMA_ASSERT(0);
    6337  return VMA_NULL;
    6338  }
    6339 }
    6340 
    6341 bool VmaAllocation_T::CanBecomeLost() const
    6342 {
    6343  switch(m_Type)
    6344  {
    6345  case ALLOCATION_TYPE_BLOCK:
    6346  return m_BlockAllocation.m_CanBecomeLost;
    6347  case ALLOCATION_TYPE_DEDICATED:
    6348  return false;
    6349  default:
    6350  VMA_ASSERT(0);
    6351  return false;
    6352  }
    6353 }
    6354 
    6355 VmaPool VmaAllocation_T::GetPool() const
    6356 {
    6357  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6358  return m_BlockAllocation.m_hPool;
    6359 }
    6360 
    6361 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6362 {
    6363  VMA_ASSERT(CanBecomeLost());
    6364 
    6365  /*
    6366  Warning: This is a carefully designed algorithm.
    6367  Do not modify unless you really know what you're doing :)
    6368  */
    6369  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6370  for(;;)
    6371  {
    6372  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6373  {
    6374  VMA_ASSERT(0);
    6375  return false;
    6376  }
    6377  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6378  {
    6379  return false;
    6380  }
    6381  else // Last use time earlier than current time.
    6382  {
    6383  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6384  {
    6385  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6386  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6387  return true;
    6388  }
    6389  }
    6390  }
    6391 }
    6392 
    6393 #if VMA_STATS_STRING_ENABLED
    6394 
    6395 // Correspond to values of enum VmaSuballocationType.
    6396 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6397  "FREE",
    6398  "UNKNOWN",
    6399  "BUFFER",
    6400  "IMAGE_UNKNOWN",
    6401  "IMAGE_LINEAR",
    6402  "IMAGE_OPTIMAL",
    6403 };
    6404 
    6405 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6406 {
    6407  json.WriteString("Type");
    6408  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6409 
    6410  json.WriteString("Size");
    6411  json.WriteNumber(m_Size);
    6412 
    6413  if(m_pUserData != VMA_NULL)
    6414  {
    6415  json.WriteString("UserData");
    6416  if(IsUserDataString())
    6417  {
    6418  json.WriteString((const char*)m_pUserData);
    6419  }
    6420  else
    6421  {
    6422  json.BeginString();
    6423  json.ContinueString_Pointer(m_pUserData);
    6424  json.EndString();
    6425  }
    6426  }
    6427 
    6428  json.WriteString("CreationFrameIndex");
    6429  json.WriteNumber(m_CreationFrameIndex);
    6430 
    6431  json.WriteString("LastUseFrameIndex");
    6432  json.WriteNumber(GetLastUseFrameIndex());
    6433 
    6434  if(m_BufferImageUsage != 0)
    6435  {
    6436  json.WriteString("Usage");
    6437  json.WriteNumber(m_BufferImageUsage);
    6438  }
    6439 }
    6440 
    6441 #endif
    6442 
    6443 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6444 {
    6445  VMA_ASSERT(IsUserDataString());
    6446  if(m_pUserData != VMA_NULL)
    6447  {
    6448  char* const oldStr = (char*)m_pUserData;
    6449  const size_t oldStrLen = strlen(oldStr);
    6450  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6451  m_pUserData = VMA_NULL;
    6452  }
    6453 }
    6454 
    6455 void VmaAllocation_T::BlockAllocMap()
    6456 {
    6457  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6458 
    6459  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6460  {
    6461  ++m_MapCount;
    6462  }
    6463  else
    6464  {
    6465  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6466  }
    6467 }
    6468 
    6469 void VmaAllocation_T::BlockAllocUnmap()
    6470 {
    6471  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6472 
    6473  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6474  {
    6475  --m_MapCount;
    6476  }
    6477  else
    6478  {
    6479  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6480  }
    6481 }
    6482 
    6483 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6484 {
    6485  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6486 
    6487  if(m_MapCount != 0)
    6488  {
    6489  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6490  {
    6491  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6492  *ppData = m_DedicatedAllocation.m_pMappedData;
    6493  ++m_MapCount;
    6494  return VK_SUCCESS;
    6495  }
    6496  else
    6497  {
    6498  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6499  return VK_ERROR_MEMORY_MAP_FAILED;
    6500  }
    6501  }
    6502  else
    6503  {
    6504  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6505  hAllocator->m_hDevice,
    6506  m_DedicatedAllocation.m_hMemory,
    6507  0, // offset
    6508  VK_WHOLE_SIZE,
    6509  0, // flags
    6510  ppData);
    6511  if(result == VK_SUCCESS)
    6512  {
    6513  m_DedicatedAllocation.m_pMappedData = *ppData;
    6514  m_MapCount = 1;
    6515  }
    6516  return result;
    6517  }
    6518 }
    6519 
    6520 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6523 
    6524  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6525  {
    6526  --m_MapCount;
    6527  if(m_MapCount == 0)
    6528  {
    6529  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6530  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6531  hAllocator->m_hDevice,
    6532  m_DedicatedAllocation.m_hMemory);
    6533  }
    6534  }
    6535  else
    6536  {
    6537  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6538  }
    6539 }
    6540 
    6541 #if VMA_STATS_STRING_ENABLED
    6542 
    6543 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6544 {
    6545  json.BeginObject();
    6546 
    6547  json.WriteString("Blocks");
    6548  json.WriteNumber(stat.blockCount);
    6549 
    6550  json.WriteString("Allocations");
    6551  json.WriteNumber(stat.allocationCount);
    6552 
    6553  json.WriteString("UnusedRanges");
    6554  json.WriteNumber(stat.unusedRangeCount);
    6555 
    6556  json.WriteString("UsedBytes");
    6557  json.WriteNumber(stat.usedBytes);
    6558 
    6559  json.WriteString("UnusedBytes");
    6560  json.WriteNumber(stat.unusedBytes);
    6561 
    6562  if(stat.allocationCount > 1)
    6563  {
    6564  json.WriteString("AllocationSize");
    6565  json.BeginObject(true);
    6566  json.WriteString("Min");
    6567  json.WriteNumber(stat.allocationSizeMin);
    6568  json.WriteString("Avg");
    6569  json.WriteNumber(stat.allocationSizeAvg);
    6570  json.WriteString("Max");
    6571  json.WriteNumber(stat.allocationSizeMax);
    6572  json.EndObject();
    6573  }
    6574 
    6575  if(stat.unusedRangeCount > 1)
    6576  {
    6577  json.WriteString("UnusedRangeSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.unusedRangeSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.unusedRangeSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.unusedRangeSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  json.EndObject();
    6589 }
    6590 
    6591 #endif // #if VMA_STATS_STRING_ENABLED
    6592 
    6593 struct VmaSuballocationItemSizeLess
    6594 {
    6595  bool operator()(
    6596  const VmaSuballocationList::iterator lhs,
    6597  const VmaSuballocationList::iterator rhs) const
    6598  {
    6599  return lhs->size < rhs->size;
    6600  }
    6601  bool operator()(
    6602  const VmaSuballocationList::iterator lhs,
    6603  VkDeviceSize rhsSize) const
    6604  {
    6605  return lhs->size < rhsSize;
    6606  }
    6607 };
    6608 
    6609 
    6611 // class VmaBlockMetadata
    6612 
    6613 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6614  m_Size(0),
    6615  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6616 {
    6617 }
    6618 
    6619 #if VMA_STATS_STRING_ENABLED
    6620 
    6621 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6622  VkDeviceSize unusedBytes,
    6623  size_t allocationCount,
    6624  size_t unusedRangeCount) const
    6625 {
    6626  json.BeginObject();
    6627 
    6628  json.WriteString("TotalBytes");
    6629  json.WriteNumber(GetSize());
    6630 
    6631  json.WriteString("UnusedBytes");
    6632  json.WriteNumber(unusedBytes);
    6633 
    6634  json.WriteString("Allocations");
    6635  json.WriteNumber((uint64_t)allocationCount);
    6636 
    6637  json.WriteString("UnusedRanges");
    6638  json.WriteNumber((uint64_t)unusedRangeCount);
    6639 
    6640  json.WriteString("Suballocations");
    6641  json.BeginArray();
    6642 }
    6643 
    6644 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6645  VkDeviceSize offset,
    6646  VmaAllocation hAllocation) const
    6647 {
    6648  json.BeginObject(true);
    6649 
    6650  json.WriteString("Offset");
    6651  json.WriteNumber(offset);
    6652 
    6653  hAllocation->PrintParameters(json);
    6654 
    6655  json.EndObject();
    6656 }
    6657 
    6658 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6659  VkDeviceSize offset,
    6660  VkDeviceSize size) const
    6661 {
    6662  json.BeginObject(true);
    6663 
    6664  json.WriteString("Offset");
    6665  json.WriteNumber(offset);
    6666 
    6667  json.WriteString("Type");
    6668  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6669 
    6670  json.WriteString("Size");
    6671  json.WriteNumber(size);
    6672 
    6673  json.EndObject();
    6674 }
    6675 
    6676 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6677 {
    6678  json.EndArray();
    6679  json.EndObject();
    6680 }
    6681 
    6682 #endif // #if VMA_STATS_STRING_ENABLED
    6683 
    6685 // class VmaBlockMetadata_Generic
    6686 
    6687 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6688  VmaBlockMetadata(hAllocator),
    6689  m_FreeCount(0),
    6690  m_SumFreeSize(0),
    6691  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6692  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6693 {
    6694 }
    6695 
    6696 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6697 {
    6698 }
    6699 
    6700 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6701 {
    6702  VmaBlockMetadata::Init(size);
    6703 
    6704  m_FreeCount = 1;
    6705  m_SumFreeSize = size;
    6706 
    6707  VmaSuballocation suballoc = {};
    6708  suballoc.offset = 0;
    6709  suballoc.size = size;
    6710  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6711  suballoc.hAllocation = VK_NULL_HANDLE;
    6712 
    6713  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6714  m_Suballocations.push_back(suballoc);
    6715  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6716  --suballocItem;
    6717  m_FreeSuballocationsBySize.push_back(suballocItem);
    6718 }
    6719 
    6720 bool VmaBlockMetadata_Generic::Validate() const
    6721 {
    6722  VMA_VALIDATE(!m_Suballocations.empty());
    6723 
    6724  // Expected offset of new suballocation as calculated from previous ones.
    6725  VkDeviceSize calculatedOffset = 0;
    6726  // Expected number of free suballocations as calculated from traversing their list.
    6727  uint32_t calculatedFreeCount = 0;
    6728  // Expected sum size of free suballocations as calculated from traversing their list.
    6729  VkDeviceSize calculatedSumFreeSize = 0;
    6730  // Expected number of free suballocations that should be registered in
    6731  // m_FreeSuballocationsBySize calculated from traversing their list.
    6732  size_t freeSuballocationsToRegister = 0;
    6733  // True if previous visited suballocation was free.
    6734  bool prevFree = false;
    6735 
    6736  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6737  suballocItem != m_Suballocations.cend();
    6738  ++suballocItem)
    6739  {
    6740  const VmaSuballocation& subAlloc = *suballocItem;
    6741 
    6742  // Actual offset of this suballocation doesn't match expected one.
    6743  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6744 
    6745  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6746  // Two adjacent free suballocations are invalid. They should be merged.
    6747  VMA_VALIDATE(!prevFree || !currFree);
    6748 
    6749  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6750 
    6751  if(currFree)
    6752  {
    6753  calculatedSumFreeSize += subAlloc.size;
    6754  ++calculatedFreeCount;
    6755  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6756  {
    6757  ++freeSuballocationsToRegister;
    6758  }
    6759 
    6760  // Margin required between allocations - every free space must be at least that large.
    6761  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6762  }
    6763  else
    6764  {
    6765  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6766  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6767 
    6768  // Margin required between allocations - previous allocation must be free.
    6769  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6770  }
    6771 
    6772  calculatedOffset += subAlloc.size;
    6773  prevFree = currFree;
    6774  }
    6775 
    6776  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6777  // match expected one.
    6778  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6779 
    6780  VkDeviceSize lastSize = 0;
    6781  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6782  {
    6783  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6784 
    6785  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6786  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6787  // They must be sorted by size ascending.
    6788  VMA_VALIDATE(suballocItem->size >= lastSize);
    6789 
    6790  lastSize = suballocItem->size;
    6791  }
    6792 
    6793  // Check if totals match calculacted values.
    6794  VMA_VALIDATE(ValidateFreeSuballocationList());
    6795  VMA_VALIDATE(calculatedOffset == GetSize());
    6796  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6797  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6798 
    6799  return true;
    6800 }
    6801 
    6802 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6803 {
    6804  if(!m_FreeSuballocationsBySize.empty())
    6805  {
    6806  return m_FreeSuballocationsBySize.back()->size;
    6807  }
    6808  else
    6809  {
    6810  return 0;
    6811  }
    6812 }
    6813 
    6814 bool VmaBlockMetadata_Generic::IsEmpty() const
    6815 {
    6816  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6817 }
    6818 
    6819 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6820 {
    6821  outInfo.blockCount = 1;
    6822 
    6823  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6824  outInfo.allocationCount = rangeCount - m_FreeCount;
    6825  outInfo.unusedRangeCount = m_FreeCount;
    6826 
    6827  outInfo.unusedBytes = m_SumFreeSize;
    6828  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6829 
    6830  outInfo.allocationSizeMin = UINT64_MAX;
    6831  outInfo.allocationSizeMax = 0;
    6832  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6833  outInfo.unusedRangeSizeMax = 0;
    6834 
    6835  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6836  suballocItem != m_Suballocations.cend();
    6837  ++suballocItem)
    6838  {
    6839  const VmaSuballocation& suballoc = *suballocItem;
    6840  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6841  {
    6842  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6843  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6844  }
    6845  else
    6846  {
    6847  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6848  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6849  }
    6850  }
    6851 }
    6852 
    6853 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6854 {
    6855  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6856 
    6857  inoutStats.size += GetSize();
    6858  inoutStats.unusedSize += m_SumFreeSize;
    6859  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6860  inoutStats.unusedRangeCount += m_FreeCount;
    6861  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6862 }
    6863 
    6864 #if VMA_STATS_STRING_ENABLED
    6865 
    6866 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6867 {
    6868  PrintDetailedMap_Begin(json,
    6869  m_SumFreeSize, // unusedBytes
    6870  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6871  m_FreeCount); // unusedRangeCount
    6872 
    6873  size_t i = 0;
    6874  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6875  suballocItem != m_Suballocations.cend();
    6876  ++suballocItem, ++i)
    6877  {
    6878  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6879  {
    6880  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6881  }
    6882  else
    6883  {
    6884  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6885  }
    6886  }
    6887 
    6888  PrintDetailedMap_End(json);
    6889 }
    6890 
    6891 #endif // #if VMA_STATS_STRING_ENABLED
    6892 
    6893 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6894  uint32_t currentFrameIndex,
    6895  uint32_t frameInUseCount,
    6896  VkDeviceSize bufferImageGranularity,
    6897  VkDeviceSize allocSize,
    6898  VkDeviceSize allocAlignment,
    6899  bool upperAddress,
    6900  VmaSuballocationType allocType,
    6901  bool canMakeOtherLost,
    6902  uint32_t strategy,
    6903  VmaAllocationRequest* pAllocationRequest)
    6904 {
    6905  VMA_ASSERT(allocSize > 0);
    6906  VMA_ASSERT(!upperAddress);
    6907  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6908  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6909  VMA_HEAVY_ASSERT(Validate());
    6910 
    6911  // There is not enough total free space in this block to fullfill the request: Early return.
    6912  if(canMakeOtherLost == false &&
    6913  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6914  {
    6915  return false;
    6916  }
    6917 
    6918  // New algorithm, efficiently searching freeSuballocationsBySize.
    6919  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6920  if(freeSuballocCount > 0)
    6921  {
    6923  {
    6924  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6925  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6926  m_FreeSuballocationsBySize.data(),
    6927  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6928  allocSize + 2 * VMA_DEBUG_MARGIN,
    6929  VmaSuballocationItemSizeLess());
    6930  size_t index = it - m_FreeSuballocationsBySize.data();
    6931  for(; index < freeSuballocCount; ++index)
    6932  {
    6933  if(CheckAllocation(
    6934  currentFrameIndex,
    6935  frameInUseCount,
    6936  bufferImageGranularity,
    6937  allocSize,
    6938  allocAlignment,
    6939  allocType,
    6940  m_FreeSuballocationsBySize[index],
    6941  false, // canMakeOtherLost
    6942  &pAllocationRequest->offset,
    6943  &pAllocationRequest->itemsToMakeLostCount,
    6944  &pAllocationRequest->sumFreeSize,
    6945  &pAllocationRequest->sumItemSize))
    6946  {
    6947  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6948  return true;
    6949  }
    6950  }
    6951  }
    6952  else // WORST_FIT, FIRST_FIT
    6953  {
    6954  // Search staring from biggest suballocations.
    6955  for(size_t index = freeSuballocCount; index--; )
    6956  {
    6957  if(CheckAllocation(
    6958  currentFrameIndex,
    6959  frameInUseCount,
    6960  bufferImageGranularity,
    6961  allocSize,
    6962  allocAlignment,
    6963  allocType,
    6964  m_FreeSuballocationsBySize[index],
    6965  false, // canMakeOtherLost
    6966  &pAllocationRequest->offset,
    6967  &pAllocationRequest->itemsToMakeLostCount,
    6968  &pAllocationRequest->sumFreeSize,
    6969  &pAllocationRequest->sumItemSize))
    6970  {
    6971  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6972  return true;
    6973  }
    6974  }
    6975  }
    6976  }
    6977 
    6978  if(canMakeOtherLost)
    6979  {
    6980  // Brute-force algorithm. TODO: Come up with something better.
    6981 
    6982  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6983  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6984 
    6985  VmaAllocationRequest tmpAllocRequest = {};
    6986  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6987  suballocIt != m_Suballocations.end();
    6988  ++suballocIt)
    6989  {
    6990  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6991  suballocIt->hAllocation->CanBecomeLost())
    6992  {
    6993  if(CheckAllocation(
    6994  currentFrameIndex,
    6995  frameInUseCount,
    6996  bufferImageGranularity,
    6997  allocSize,
    6998  allocAlignment,
    6999  allocType,
    7000  suballocIt,
    7001  canMakeOtherLost,
    7002  &tmpAllocRequest.offset,
    7003  &tmpAllocRequest.itemsToMakeLostCount,
    7004  &tmpAllocRequest.sumFreeSize,
    7005  &tmpAllocRequest.sumItemSize))
    7006  {
    7007  tmpAllocRequest.item = suballocIt;
    7008 
    7009  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7011  {
    7012  *pAllocationRequest = tmpAllocRequest;
    7013  }
    7014  }
    7015  }
    7016  }
    7017 
    7018  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7019  {
    7020  return true;
    7021  }
    7022  }
    7023 
    7024  return false;
    7025 }
    7026 
    7027 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7028  uint32_t currentFrameIndex,
    7029  uint32_t frameInUseCount,
    7030  VmaAllocationRequest* pAllocationRequest)
    7031 {
    7032  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7033  {
    7034  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7035  {
    7036  ++pAllocationRequest->item;
    7037  }
    7038  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7039  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7040  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7041  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7042  {
    7043  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7044  --pAllocationRequest->itemsToMakeLostCount;
    7045  }
    7046  else
    7047  {
    7048  return false;
    7049  }
    7050  }
    7051 
    7052  VMA_HEAVY_ASSERT(Validate());
    7053  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7054  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7055 
    7056  return true;
    7057 }
    7058 
    7059 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7060 {
    7061  uint32_t lostAllocationCount = 0;
    7062  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7063  it != m_Suballocations.end();
    7064  ++it)
    7065  {
    7066  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7067  it->hAllocation->CanBecomeLost() &&
    7068  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7069  {
    7070  it = FreeSuballocation(it);
    7071  ++lostAllocationCount;
    7072  }
    7073  }
    7074  return lostAllocationCount;
    7075 }
    7076 
    7077 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7078 {
    7079  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7080  it != m_Suballocations.end();
    7081  ++it)
    7082  {
    7083  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7084  {
    7085  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7086  {
    7087  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7088  return VK_ERROR_VALIDATION_FAILED_EXT;
    7089  }
    7090  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7091  {
    7092  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7093  return VK_ERROR_VALIDATION_FAILED_EXT;
    7094  }
    7095  }
    7096  }
    7097 
    7098  return VK_SUCCESS;
    7099 }
    7100 
    7101 void VmaBlockMetadata_Generic::Alloc(
    7102  const VmaAllocationRequest& request,
    7103  VmaSuballocationType type,
    7104  VkDeviceSize allocSize,
    7105  bool upperAddress,
    7106  VmaAllocation hAllocation)
    7107 {
    7108  VMA_ASSERT(!upperAddress);
    7109  VMA_ASSERT(request.item != m_Suballocations.end());
    7110  VmaSuballocation& suballoc = *request.item;
    7111  // Given suballocation is a free block.
    7112  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7113  // Given offset is inside this suballocation.
    7114  VMA_ASSERT(request.offset >= suballoc.offset);
    7115  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7116  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7117  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7118 
    7119  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7120  // it to become used.
    7121  UnregisterFreeSuballocation(request.item);
    7122 
    7123  suballoc.offset = request.offset;
    7124  suballoc.size = allocSize;
    7125  suballoc.type = type;
    7126  suballoc.hAllocation = hAllocation;
    7127 
    7128  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7129  if(paddingEnd)
    7130  {
    7131  VmaSuballocation paddingSuballoc = {};
    7132  paddingSuballoc.offset = request.offset + allocSize;
    7133  paddingSuballoc.size = paddingEnd;
    7134  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7135  VmaSuballocationList::iterator next = request.item;
    7136  ++next;
    7137  const VmaSuballocationList::iterator paddingEndItem =
    7138  m_Suballocations.insert(next, paddingSuballoc);
    7139  RegisterFreeSuballocation(paddingEndItem);
    7140  }
    7141 
    7142  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7143  if(paddingBegin)
    7144  {
    7145  VmaSuballocation paddingSuballoc = {};
    7146  paddingSuballoc.offset = request.offset - paddingBegin;
    7147  paddingSuballoc.size = paddingBegin;
    7148  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7149  const VmaSuballocationList::iterator paddingBeginItem =
    7150  m_Suballocations.insert(request.item, paddingSuballoc);
    7151  RegisterFreeSuballocation(paddingBeginItem);
    7152  }
    7153 
    7154  // Update totals.
    7155  m_FreeCount = m_FreeCount - 1;
    7156  if(paddingBegin > 0)
    7157  {
    7158  ++m_FreeCount;
    7159  }
    7160  if(paddingEnd > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  m_SumFreeSize -= allocSize;
    7165 }
    7166 
    7167 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7168 {
    7169  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7170  suballocItem != m_Suballocations.end();
    7171  ++suballocItem)
    7172  {
    7173  VmaSuballocation& suballoc = *suballocItem;
    7174  if(suballoc.hAllocation == allocation)
    7175  {
    7176  FreeSuballocation(suballocItem);
    7177  VMA_HEAVY_ASSERT(Validate());
    7178  return;
    7179  }
    7180  }
    7181  VMA_ASSERT(0 && "Not found!");
    7182 }
    7183 
    7184 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7185 {
    7186  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7187  suballocItem != m_Suballocations.end();
    7188  ++suballocItem)
    7189  {
    7190  VmaSuballocation& suballoc = *suballocItem;
    7191  if(suballoc.offset == offset)
    7192  {
    7193  FreeSuballocation(suballocItem);
    7194  return;
    7195  }
    7196  }
    7197  VMA_ASSERT(0 && "Not found!");
    7198 }
    7199 
    7200 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7201 {
    7202  VkDeviceSize lastSize = 0;
    7203  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7204  {
    7205  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7206 
    7207  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7208  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7209  VMA_VALIDATE(it->size >= lastSize);
    7210  lastSize = it->size;
    7211  }
    7212  return true;
    7213 }
    7214 
    7215 bool VmaBlockMetadata_Generic::CheckAllocation(
    7216  uint32_t currentFrameIndex,
    7217  uint32_t frameInUseCount,
    7218  VkDeviceSize bufferImageGranularity,
    7219  VkDeviceSize allocSize,
    7220  VkDeviceSize allocAlignment,
    7221  VmaSuballocationType allocType,
    7222  VmaSuballocationList::const_iterator suballocItem,
    7223  bool canMakeOtherLost,
    7224  VkDeviceSize* pOffset,
    7225  size_t* itemsToMakeLostCount,
    7226  VkDeviceSize* pSumFreeSize,
    7227  VkDeviceSize* pSumItemSize) const
    7228 {
    7229  VMA_ASSERT(allocSize > 0);
    7230  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7231  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7232  VMA_ASSERT(pOffset != VMA_NULL);
    7233 
    7234  *itemsToMakeLostCount = 0;
    7235  *pSumFreeSize = 0;
    7236  *pSumItemSize = 0;
    7237 
    7238  if(canMakeOtherLost)
    7239  {
    7240  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7241  {
    7242  *pSumFreeSize = suballocItem->size;
    7243  }
    7244  else
    7245  {
    7246  if(suballocItem->hAllocation->CanBecomeLost() &&
    7247  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7248  {
    7249  ++*itemsToMakeLostCount;
    7250  *pSumItemSize = suballocItem->size;
    7251  }
    7252  else
    7253  {
    7254  return false;
    7255  }
    7256  }
    7257 
    7258  // Remaining size is too small for this request: Early return.
    7259  if(GetSize() - suballocItem->offset < allocSize)
    7260  {
    7261  return false;
    7262  }
    7263 
    7264  // Start from offset equal to beginning of this suballocation.
    7265  *pOffset = suballocItem->offset;
    7266 
    7267  // Apply VMA_DEBUG_MARGIN at the beginning.
    7268  if(VMA_DEBUG_MARGIN > 0)
    7269  {
    7270  *pOffset += VMA_DEBUG_MARGIN;
    7271  }
    7272 
    7273  // Apply alignment.
    7274  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7275 
    7276  // Check previous suballocations for BufferImageGranularity conflicts.
    7277  // Make bigger alignment if necessary.
    7278  if(bufferImageGranularity > 1)
    7279  {
    7280  bool bufferImageGranularityConflict = false;
    7281  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7282  while(prevSuballocItem != m_Suballocations.cbegin())
    7283  {
    7284  --prevSuballocItem;
    7285  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7286  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7287  {
    7288  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7289  {
    7290  bufferImageGranularityConflict = true;
    7291  break;
    7292  }
    7293  }
    7294  else
    7295  // Already on previous page.
    7296  break;
    7297  }
    7298  if(bufferImageGranularityConflict)
    7299  {
    7300  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7301  }
    7302  }
    7303 
    7304  // Now that we have final *pOffset, check if we are past suballocItem.
    7305  // If yes, return false - this function should be called for another suballocItem as starting point.
    7306  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7307  {
    7308  return false;
    7309  }
    7310 
    7311  // Calculate padding at the beginning based on current offset.
    7312  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7313 
    7314  // Calculate required margin at the end.
    7315  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7316 
    7317  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7318  // Another early return check.
    7319  if(suballocItem->offset + totalSize > GetSize())
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Advance lastSuballocItem until desired size is reached.
    7325  // Update itemsToMakeLostCount.
    7326  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7327  if(totalSize > suballocItem->size)
    7328  {
    7329  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7330  while(remainingSize > 0)
    7331  {
    7332  ++lastSuballocItem;
    7333  if(lastSuballocItem == m_Suballocations.cend())
    7334  {
    7335  return false;
    7336  }
    7337  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7338  {
    7339  *pSumFreeSize += lastSuballocItem->size;
    7340  }
    7341  else
    7342  {
    7343  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7344  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7345  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7346  {
    7347  ++*itemsToMakeLostCount;
    7348  *pSumItemSize += lastSuballocItem->size;
    7349  }
    7350  else
    7351  {
    7352  return false;
    7353  }
    7354  }
    7355  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7356  remainingSize - lastSuballocItem->size : 0;
    7357  }
    7358  }
    7359 
    7360  // Check next suballocations for BufferImageGranularity conflicts.
    7361  // If conflict exists, we must mark more allocations lost or fail.
    7362  if(bufferImageGranularity > 1)
    7363  {
    7364  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7365  ++nextSuballocItem;
    7366  while(nextSuballocItem != m_Suballocations.cend())
    7367  {
    7368  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7369  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7370  {
    7371  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7372  {
    7373  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7374  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7375  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7376  {
    7377  ++*itemsToMakeLostCount;
    7378  }
    7379  else
    7380  {
    7381  return false;
    7382  }
    7383  }
    7384  }
    7385  else
    7386  {
    7387  // Already on next page.
    7388  break;
    7389  }
    7390  ++nextSuballocItem;
    7391  }
    7392  }
    7393  }
    7394  else
    7395  {
    7396  const VmaSuballocation& suballoc = *suballocItem;
    7397  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7398 
    7399  *pSumFreeSize = suballoc.size;
    7400 
    7401  // Size of this suballocation is too small for this request: Early return.
    7402  if(suballoc.size < allocSize)
    7403  {
    7404  return false;
    7405  }
    7406 
    7407  // Start from offset equal to beginning of this suballocation.
    7408  *pOffset = suballoc.offset;
    7409 
    7410  // Apply VMA_DEBUG_MARGIN at the beginning.
    7411  if(VMA_DEBUG_MARGIN > 0)
    7412  {
    7413  *pOffset += VMA_DEBUG_MARGIN;
    7414  }
    7415 
    7416  // Apply alignment.
    7417  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7418 
    7419  // Check previous suballocations for BufferImageGranularity conflicts.
    7420  // Make bigger alignment if necessary.
    7421  if(bufferImageGranularity > 1)
    7422  {
    7423  bool bufferImageGranularityConflict = false;
    7424  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7425  while(prevSuballocItem != m_Suballocations.cbegin())
    7426  {
    7427  --prevSuballocItem;
    7428  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7429  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7430  {
    7431  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7432  {
    7433  bufferImageGranularityConflict = true;
    7434  break;
    7435  }
    7436  }
    7437  else
    7438  // Already on previous page.
    7439  break;
    7440  }
    7441  if(bufferImageGranularityConflict)
    7442  {
    7443  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7444  }
    7445  }
    7446 
    7447  // Calculate padding at the beginning based on current offset.
    7448  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7449 
    7450  // Calculate required margin at the end.
    7451  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7452 
    7453  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7454  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7455  {
    7456  return false;
    7457  }
    7458 
    7459  // Check next suballocations for BufferImageGranularity conflicts.
    7460  // If conflict exists, allocation cannot be made here.
    7461  if(bufferImageGranularity > 1)
    7462  {
    7463  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7464  ++nextSuballocItem;
    7465  while(nextSuballocItem != m_Suballocations.cend())
    7466  {
    7467  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7468  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7469  {
    7470  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7471  {
    7472  return false;
    7473  }
    7474  }
    7475  else
    7476  {
    7477  // Already on next page.
    7478  break;
    7479  }
    7480  ++nextSuballocItem;
    7481  }
    7482  }
    7483  }
    7484 
    7485  // All tests passed: Success. pOffset is already filled.
    7486  return true;
    7487 }
    7488 
    7489 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7490 {
    7491  VMA_ASSERT(item != m_Suballocations.end());
    7492  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7493 
    7494  VmaSuballocationList::iterator nextItem = item;
    7495  ++nextItem;
    7496  VMA_ASSERT(nextItem != m_Suballocations.end());
    7497  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7498 
    7499  item->size += nextItem->size;
    7500  --m_FreeCount;
    7501  m_Suballocations.erase(nextItem);
    7502 }
    7503 
    7504 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7505 {
    7506  // Change this suballocation to be marked as free.
    7507  VmaSuballocation& suballoc = *suballocItem;
    7508  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7509  suballoc.hAllocation = VK_NULL_HANDLE;
    7510 
    7511  // Update totals.
    7512  ++m_FreeCount;
    7513  m_SumFreeSize += suballoc.size;
    7514 
    7515  // Merge with previous and/or next suballocation if it's also free.
    7516  bool mergeWithNext = false;
    7517  bool mergeWithPrev = false;
    7518 
    7519  VmaSuballocationList::iterator nextItem = suballocItem;
    7520  ++nextItem;
    7521  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7522  {
    7523  mergeWithNext = true;
    7524  }
    7525 
    7526  VmaSuballocationList::iterator prevItem = suballocItem;
    7527  if(suballocItem != m_Suballocations.begin())
    7528  {
    7529  --prevItem;
    7530  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7531  {
    7532  mergeWithPrev = true;
    7533  }
    7534  }
    7535 
    7536  if(mergeWithNext)
    7537  {
    7538  UnregisterFreeSuballocation(nextItem);
    7539  MergeFreeWithNext(suballocItem);
    7540  }
    7541 
    7542  if(mergeWithPrev)
    7543  {
    7544  UnregisterFreeSuballocation(prevItem);
    7545  MergeFreeWithNext(prevItem);
    7546  RegisterFreeSuballocation(prevItem);
    7547  return prevItem;
    7548  }
    7549  else
    7550  {
    7551  RegisterFreeSuballocation(suballocItem);
    7552  return suballocItem;
    7553  }
    7554 }
    7555 
    7556 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7557 {
    7558  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7559  VMA_ASSERT(item->size > 0);
    7560 
    7561  // You may want to enable this validation at the beginning or at the end of
    7562  // this function, depending on what do you want to check.
    7563  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7564 
    7565  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7566  {
    7567  if(m_FreeSuballocationsBySize.empty())
    7568  {
    7569  m_FreeSuballocationsBySize.push_back(item);
    7570  }
    7571  else
    7572  {
    7573  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7574  }
    7575  }
    7576 
    7577  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7578 }
    7579 
    7580 
    7581 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7582 {
    7583  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7584  VMA_ASSERT(item->size > 0);
    7585 
    7586  // You may want to enable this validation at the beginning or at the end of
    7587  // this function, depending on what do you want to check.
    7588  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7589 
    7590  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7591  {
    7592  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7593  m_FreeSuballocationsBySize.data(),
    7594  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7595  item,
    7596  VmaSuballocationItemSizeLess());
    7597  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7598  index < m_FreeSuballocationsBySize.size();
    7599  ++index)
    7600  {
    7601  if(m_FreeSuballocationsBySize[index] == item)
    7602  {
    7603  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7604  return;
    7605  }
    7606  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7607  }
    7608  VMA_ASSERT(0 && "Not found.");
    7609  }
    7610 
    7611  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7612 }
    7613 
    7615 // class VmaBlockMetadata_Linear
    7616 
    7617 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7618  VmaBlockMetadata(hAllocator),
    7619  m_SumFreeSize(0),
    7620  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7621  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7622  m_1stVectorIndex(0),
    7623  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7624  m_1stNullItemsBeginCount(0),
    7625  m_1stNullItemsMiddleCount(0),
    7626  m_2ndNullItemsCount(0)
    7627 {
    7628 }
    7629 
    7630 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7631 {
    7632 }
    7633 
    7634 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7635 {
    7636  VmaBlockMetadata::Init(size);
    7637  m_SumFreeSize = size;
    7638 }
    7639 
    7640 bool VmaBlockMetadata_Linear::Validate() const
    7641 {
    7642  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7643  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7644 
    7645  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7646  VMA_VALIDATE(!suballocations1st.empty() ||
    7647  suballocations2nd.empty() ||
    7648  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7649 
    7650  if(!suballocations1st.empty())
    7651  {
    7652  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7653  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7654  // Null item at the end should be just pop_back().
    7655  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7656  }
    7657  if(!suballocations2nd.empty())
    7658  {
    7659  // Null item at the end should be just pop_back().
    7660  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7661  }
    7662 
    7663  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7664  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7665 
    7666  VkDeviceSize sumUsedSize = 0;
    7667  const size_t suballoc1stCount = suballocations1st.size();
    7668  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7669 
    7670  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7671  {
    7672  const size_t suballoc2ndCount = suballocations2nd.size();
    7673  size_t nullItem2ndCount = 0;
    7674  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7675  {
    7676  const VmaSuballocation& suballoc = suballocations2nd[i];
    7677  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7678 
    7679  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7680  VMA_VALIDATE(suballoc.offset >= offset);
    7681 
    7682  if(!currFree)
    7683  {
    7684  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7685  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7686  sumUsedSize += suballoc.size;
    7687  }
    7688  else
    7689  {
    7690  ++nullItem2ndCount;
    7691  }
    7692 
    7693  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7694  }
    7695 
    7696  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7697  }
    7698 
    7699  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7700  {
    7701  const VmaSuballocation& suballoc = suballocations1st[i];
    7702  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7703  suballoc.hAllocation == VK_NULL_HANDLE);
    7704  }
    7705 
    7706  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7707 
    7708  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7709  {
    7710  const VmaSuballocation& suballoc = suballocations1st[i];
    7711  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7712 
    7713  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7714  VMA_VALIDATE(suballoc.offset >= offset);
    7715  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7716 
    7717  if(!currFree)
    7718  {
    7719  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7720  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7721  sumUsedSize += suballoc.size;
    7722  }
    7723  else
    7724  {
    7725  ++nullItem1stCount;
    7726  }
    7727 
    7728  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7729  }
    7730  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7731 
    7732  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7733  {
    7734  const size_t suballoc2ndCount = suballocations2nd.size();
    7735  size_t nullItem2ndCount = 0;
    7736  for(size_t i = suballoc2ndCount; i--; )
    7737  {
    7738  const VmaSuballocation& suballoc = suballocations2nd[i];
    7739  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7740 
    7741  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7742  VMA_VALIDATE(suballoc.offset >= offset);
    7743 
    7744  if(!currFree)
    7745  {
    7746  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7747  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7748  sumUsedSize += suballoc.size;
    7749  }
    7750  else
    7751  {
    7752  ++nullItem2ndCount;
    7753  }
    7754 
    7755  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7756  }
    7757 
    7758  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7759  }
    7760 
    7761  VMA_VALIDATE(offset <= GetSize());
    7762  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7763 
    7764  return true;
    7765 }
    7766 
    7767 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7768 {
    7769  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7770  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7771 }
    7772 
    7773 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7774 {
    7775  const VkDeviceSize size = GetSize();
    7776 
    7777  /*
    7778  We don't consider gaps inside allocation vectors with freed allocations because
    7779  they are not suitable for reuse in linear allocator. We consider only space that
    7780  is available for new allocations.
    7781  */
    7782  if(IsEmpty())
    7783  {
    7784  return size;
    7785  }
    7786 
    7787  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7788 
    7789  switch(m_2ndVectorMode)
    7790  {
    7791  case SECOND_VECTOR_EMPTY:
    7792  /*
    7793  Available space is after end of 1st, as well as before beginning of 1st (which
    7794  whould make it a ring buffer).
    7795  */
    7796  {
    7797  const size_t suballocations1stCount = suballocations1st.size();
    7798  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7799  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7800  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7801  return VMA_MAX(
    7802  firstSuballoc.offset,
    7803  size - (lastSuballoc.offset + lastSuballoc.size));
    7804  }
    7805  break;
    7806 
    7807  case SECOND_VECTOR_RING_BUFFER:
    7808  /*
    7809  Available space is only between end of 2nd and beginning of 1st.
    7810  */
    7811  {
    7812  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7813  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7814  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7815  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7816  }
    7817  break;
    7818 
    7819  case SECOND_VECTOR_DOUBLE_STACK:
    7820  /*
    7821  Available space is only between end of 1st and top of 2nd.
    7822  */
    7823  {
    7824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7825  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7826  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7827  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7828  }
    7829  break;
    7830 
    7831  default:
    7832  VMA_ASSERT(0);
    7833  return 0;
    7834  }
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7838 {
    7839  const VkDeviceSize size = GetSize();
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842  const size_t suballoc1stCount = suballocations1st.size();
    7843  const size_t suballoc2ndCount = suballocations2nd.size();
    7844 
    7845  outInfo.blockCount = 1;
    7846  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7847  outInfo.unusedRangeCount = 0;
    7848  outInfo.usedBytes = 0;
    7849  outInfo.allocationSizeMin = UINT64_MAX;
    7850  outInfo.allocationSizeMax = 0;
    7851  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7852  outInfo.unusedRangeSizeMax = 0;
    7853 
    7854  VkDeviceSize lastOffset = 0;
    7855 
    7856  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7857  {
    7858  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7859  size_t nextAlloc2ndIndex = 0;
    7860  while(lastOffset < freeSpace2ndTo1stEnd)
    7861  {
    7862  // Find next non-null allocation or move nextAllocIndex to the end.
    7863  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7864  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7865  {
    7866  ++nextAlloc2ndIndex;
    7867  }
    7868 
    7869  // Found non-null allocation.
    7870  if(nextAlloc2ndIndex < suballoc2ndCount)
    7871  {
    7872  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7873 
    7874  // 1. Process free space before this allocation.
    7875  if(lastOffset < suballoc.offset)
    7876  {
    7877  // There is free space from lastOffset to suballoc.offset.
    7878  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7879  ++outInfo.unusedRangeCount;
    7880  outInfo.unusedBytes += unusedRangeSize;
    7881  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7882  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7883  }
    7884 
    7885  // 2. Process this allocation.
    7886  // There is allocation with suballoc.offset, suballoc.size.
    7887  outInfo.usedBytes += suballoc.size;
    7888  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7889  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7890 
    7891  // 3. Prepare for next iteration.
    7892  lastOffset = suballoc.offset + suballoc.size;
    7893  ++nextAlloc2ndIndex;
    7894  }
    7895  // We are at the end.
    7896  else
    7897  {
    7898  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7899  if(lastOffset < freeSpace2ndTo1stEnd)
    7900  {
    7901  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7902  ++outInfo.unusedRangeCount;
    7903  outInfo.unusedBytes += unusedRangeSize;
    7904  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7905  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7906  }
    7907 
    7908  // End of loop.
    7909  lastOffset = freeSpace2ndTo1stEnd;
    7910  }
    7911  }
    7912  }
    7913 
    7914  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7915  const VkDeviceSize freeSpace1stTo2ndEnd =
    7916  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7917  while(lastOffset < freeSpace1stTo2ndEnd)
    7918  {
    7919  // Find next non-null allocation or move nextAllocIndex to the end.
    7920  while(nextAlloc1stIndex < suballoc1stCount &&
    7921  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7922  {
    7923  ++nextAlloc1stIndex;
    7924  }
    7925 
    7926  // Found non-null allocation.
    7927  if(nextAlloc1stIndex < suballoc1stCount)
    7928  {
    7929  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7930 
    7931  // 1. Process free space before this allocation.
    7932  if(lastOffset < suballoc.offset)
    7933  {
    7934  // There is free space from lastOffset to suballoc.offset.
    7935  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7936  ++outInfo.unusedRangeCount;
    7937  outInfo.unusedBytes += unusedRangeSize;
    7938  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7939  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7940  }
    7941 
    7942  // 2. Process this allocation.
    7943  // There is allocation with suballoc.offset, suballoc.size.
    7944  outInfo.usedBytes += suballoc.size;
    7945  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7946  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7947 
    7948  // 3. Prepare for next iteration.
    7949  lastOffset = suballoc.offset + suballoc.size;
    7950  ++nextAlloc1stIndex;
    7951  }
    7952  // We are at the end.
    7953  else
    7954  {
    7955  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7956  if(lastOffset < freeSpace1stTo2ndEnd)
    7957  {
    7958  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7959  ++outInfo.unusedRangeCount;
    7960  outInfo.unusedBytes += unusedRangeSize;
    7961  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7962  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7963  }
    7964 
    7965  // End of loop.
    7966  lastOffset = freeSpace1stTo2ndEnd;
    7967  }
    7968  }
    7969 
    7970  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7971  {
    7972  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7973  while(lastOffset < size)
    7974  {
    7975  // Find next non-null allocation or move nextAllocIndex to the end.
    7976  while(nextAlloc2ndIndex != SIZE_MAX &&
    7977  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7978  {
    7979  --nextAlloc2ndIndex;
    7980  }
    7981 
    7982  // Found non-null allocation.
    7983  if(nextAlloc2ndIndex != SIZE_MAX)
    7984  {
    7985  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7986 
    7987  // 1. Process free space before this allocation.
    7988  if(lastOffset < suballoc.offset)
    7989  {
    7990  // There is free space from lastOffset to suballoc.offset.
    7991  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7992  ++outInfo.unusedRangeCount;
    7993  outInfo.unusedBytes += unusedRangeSize;
    7994  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7995  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7996  }
    7997 
    7998  // 2. Process this allocation.
    7999  // There is allocation with suballoc.offset, suballoc.size.
    8000  outInfo.usedBytes += suballoc.size;
    8001  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8002  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8003 
    8004  // 3. Prepare for next iteration.
    8005  lastOffset = suballoc.offset + suballoc.size;
    8006  --nextAlloc2ndIndex;
    8007  }
    8008  // We are at the end.
    8009  else
    8010  {
    8011  // There is free space from lastOffset to size.
    8012  if(lastOffset < size)
    8013  {
    8014  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8015  ++outInfo.unusedRangeCount;
    8016  outInfo.unusedBytes += unusedRangeSize;
    8017  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8018  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8019  }
    8020 
    8021  // End of loop.
    8022  lastOffset = size;
    8023  }
    8024  }
    8025  }
    8026 
    8027  outInfo.unusedBytes = size - outInfo.usedBytes;
    8028 }
    8029 
    8030 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8031 {
    8032  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8033  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8034  const VkDeviceSize size = GetSize();
    8035  const size_t suballoc1stCount = suballocations1st.size();
    8036  const size_t suballoc2ndCount = suballocations2nd.size();
    8037 
    8038  inoutStats.size += size;
    8039 
    8040  VkDeviceSize lastOffset = 0;
    8041 
    8042  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8043  {
    8044  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8045  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8046  while(lastOffset < freeSpace2ndTo1stEnd)
    8047  {
    8048  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8049  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8050  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8051  {
    8052  ++nextAlloc2ndIndex;
    8053  }
    8054 
    8055  // Found non-null allocation.
    8056  if(nextAlloc2ndIndex < suballoc2ndCount)
    8057  {
    8058  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8059 
    8060  // 1. Process free space before this allocation.
    8061  if(lastOffset < suballoc.offset)
    8062  {
    8063  // There is free space from lastOffset to suballoc.offset.
    8064  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8065  inoutStats.unusedSize += unusedRangeSize;
    8066  ++inoutStats.unusedRangeCount;
    8067  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8068  }
    8069 
    8070  // 2. Process this allocation.
    8071  // There is allocation with suballoc.offset, suballoc.size.
    8072  ++inoutStats.allocationCount;
    8073 
    8074  // 3. Prepare for next iteration.
    8075  lastOffset = suballoc.offset + suballoc.size;
    8076  ++nextAlloc2ndIndex;
    8077  }
    8078  // We are at the end.
    8079  else
    8080  {
    8081  if(lastOffset < freeSpace2ndTo1stEnd)
    8082  {
    8083  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8084  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8085  inoutStats.unusedSize += unusedRangeSize;
    8086  ++inoutStats.unusedRangeCount;
    8087  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8088  }
    8089 
    8090  // End of loop.
    8091  lastOffset = freeSpace2ndTo1stEnd;
    8092  }
    8093  }
    8094  }
    8095 
    8096  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8097  const VkDeviceSize freeSpace1stTo2ndEnd =
    8098  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8099  while(lastOffset < freeSpace1stTo2ndEnd)
    8100  {
    8101  // Find next non-null allocation or move nextAllocIndex to the end.
    8102  while(nextAlloc1stIndex < suballoc1stCount &&
    8103  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8104  {
    8105  ++nextAlloc1stIndex;
    8106  }
    8107 
    8108  // Found non-null allocation.
    8109  if(nextAlloc1stIndex < suballoc1stCount)
    8110  {
    8111  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8112 
    8113  // 1. Process free space before this allocation.
    8114  if(lastOffset < suballoc.offset)
    8115  {
    8116  // There is free space from lastOffset to suballoc.offset.
    8117  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8118  inoutStats.unusedSize += unusedRangeSize;
    8119  ++inoutStats.unusedRangeCount;
    8120  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8121  }
    8122 
    8123  // 2. Process this allocation.
    8124  // There is allocation with suballoc.offset, suballoc.size.
    8125  ++inoutStats.allocationCount;
    8126 
    8127  // 3. Prepare for next iteration.
    8128  lastOffset = suballoc.offset + suballoc.size;
    8129  ++nextAlloc1stIndex;
    8130  }
    8131  // We are at the end.
    8132  else
    8133  {
    8134  if(lastOffset < freeSpace1stTo2ndEnd)
    8135  {
    8136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8137  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8138  inoutStats.unusedSize += unusedRangeSize;
    8139  ++inoutStats.unusedRangeCount;
    8140  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8141  }
    8142 
    8143  // End of loop.
    8144  lastOffset = freeSpace1stTo2ndEnd;
    8145  }
    8146  }
    8147 
    8148  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8149  {
    8150  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8151  while(lastOffset < size)
    8152  {
    8153  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8154  while(nextAlloc2ndIndex != SIZE_MAX &&
    8155  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8156  {
    8157  --nextAlloc2ndIndex;
    8158  }
    8159 
    8160  // Found non-null allocation.
    8161  if(nextAlloc2ndIndex != SIZE_MAX)
    8162  {
    8163  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8164 
    8165  // 1. Process free space before this allocation.
    8166  if(lastOffset < suballoc.offset)
    8167  {
    8168  // There is free space from lastOffset to suballoc.offset.
    8169  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8170  inoutStats.unusedSize += unusedRangeSize;
    8171  ++inoutStats.unusedRangeCount;
    8172  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8173  }
    8174 
    8175  // 2. Process this allocation.
    8176  // There is allocation with suballoc.offset, suballoc.size.
    8177  ++inoutStats.allocationCount;
    8178 
    8179  // 3. Prepare for next iteration.
    8180  lastOffset = suballoc.offset + suballoc.size;
    8181  --nextAlloc2ndIndex;
    8182  }
    8183  // We are at the end.
    8184  else
    8185  {
    8186  if(lastOffset < size)
    8187  {
    8188  // There is free space from lastOffset to size.
    8189  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8190  inoutStats.unusedSize += unusedRangeSize;
    8191  ++inoutStats.unusedRangeCount;
    8192  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8193  }
    8194 
    8195  // End of loop.
    8196  lastOffset = size;
    8197  }
    8198  }
    8199  }
    8200 }
    8201 
    8202 #if VMA_STATS_STRING_ENABLED
    8203 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8204 {
    8205  const VkDeviceSize size = GetSize();
    8206  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8207  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8208  const size_t suballoc1stCount = suballocations1st.size();
    8209  const size_t suballoc2ndCount = suballocations2nd.size();
    8210 
    8211  // FIRST PASS
    8212 
    8213  size_t unusedRangeCount = 0;
    8214  VkDeviceSize usedBytes = 0;
    8215 
    8216  VkDeviceSize lastOffset = 0;
    8217 
    8218  size_t alloc2ndCount = 0;
    8219  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8220  {
    8221  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8222  size_t nextAlloc2ndIndex = 0;
    8223  while(lastOffset < freeSpace2ndTo1stEnd)
    8224  {
    8225  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8226  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8228  {
    8229  ++nextAlloc2ndIndex;
    8230  }
    8231 
    8232  // Found non-null allocation.
    8233  if(nextAlloc2ndIndex < suballoc2ndCount)
    8234  {
    8235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8236 
    8237  // 1. Process free space before this allocation.
    8238  if(lastOffset < suballoc.offset)
    8239  {
    8240  // There is free space from lastOffset to suballoc.offset.
    8241  ++unusedRangeCount;
    8242  }
    8243 
    8244  // 2. Process this allocation.
    8245  // There is allocation with suballoc.offset, suballoc.size.
    8246  ++alloc2ndCount;
    8247  usedBytes += suballoc.size;
    8248 
    8249  // 3. Prepare for next iteration.
    8250  lastOffset = suballoc.offset + suballoc.size;
    8251  ++nextAlloc2ndIndex;
    8252  }
    8253  // We are at the end.
    8254  else
    8255  {
    8256  if(lastOffset < freeSpace2ndTo1stEnd)
    8257  {
    8258  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8259  ++unusedRangeCount;
    8260  }
    8261 
    8262  // End of loop.
    8263  lastOffset = freeSpace2ndTo1stEnd;
    8264  }
    8265  }
    8266  }
    8267 
    8268  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8269  size_t alloc1stCount = 0;
    8270  const VkDeviceSize freeSpace1stTo2ndEnd =
    8271  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8272  while(lastOffset < freeSpace1stTo2ndEnd)
    8273  {
    8274  // Find next non-null allocation or move nextAllocIndex to the end.
    8275  while(nextAlloc1stIndex < suballoc1stCount &&
    8276  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8277  {
    8278  ++nextAlloc1stIndex;
    8279  }
    8280 
    8281  // Found non-null allocation.
    8282  if(nextAlloc1stIndex < suballoc1stCount)
    8283  {
    8284  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8285 
    8286  // 1. Process free space before this allocation.
    8287  if(lastOffset < suballoc.offset)
    8288  {
    8289  // There is free space from lastOffset to suballoc.offset.
    8290  ++unusedRangeCount;
    8291  }
    8292 
    8293  // 2. Process this allocation.
    8294  // There is allocation with suballoc.offset, suballoc.size.
    8295  ++alloc1stCount;
    8296  usedBytes += suballoc.size;
    8297 
    8298  // 3. Prepare for next iteration.
    8299  lastOffset = suballoc.offset + suballoc.size;
    8300  ++nextAlloc1stIndex;
    8301  }
    8302  // We are at the end.
    8303  else
    8304  {
    8305  if(lastOffset < size)
    8306  {
    8307  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8308  ++unusedRangeCount;
    8309  }
    8310 
    8311  // End of loop.
    8312  lastOffset = freeSpace1stTo2ndEnd;
    8313  }
    8314  }
    8315 
    8316  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8317  {
    8318  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8319  while(lastOffset < size)
    8320  {
    8321  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8322  while(nextAlloc2ndIndex != SIZE_MAX &&
    8323  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8324  {
    8325  --nextAlloc2ndIndex;
    8326  }
    8327 
    8328  // Found non-null allocation.
    8329  if(nextAlloc2ndIndex != SIZE_MAX)
    8330  {
    8331  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8332 
    8333  // 1. Process free space before this allocation.
    8334  if(lastOffset < suballoc.offset)
    8335  {
    8336  // There is free space from lastOffset to suballoc.offset.
    8337  ++unusedRangeCount;
    8338  }
    8339 
    8340  // 2. Process this allocation.
    8341  // There is allocation with suballoc.offset, suballoc.size.
    8342  ++alloc2ndCount;
    8343  usedBytes += suballoc.size;
    8344 
    8345  // 3. Prepare for next iteration.
    8346  lastOffset = suballoc.offset + suballoc.size;
    8347  --nextAlloc2ndIndex;
    8348  }
    8349  // We are at the end.
    8350  else
    8351  {
    8352  if(lastOffset < size)
    8353  {
    8354  // There is free space from lastOffset to size.
    8355  ++unusedRangeCount;
    8356  }
    8357 
    8358  // End of loop.
    8359  lastOffset = size;
    8360  }
    8361  }
    8362  }
    8363 
    8364  const VkDeviceSize unusedBytes = size - usedBytes;
    8365  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8366 
    8367  // SECOND PASS
    8368  lastOffset = 0;
    8369 
    8370  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8371  {
    8372  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8373  size_t nextAlloc2ndIndex = 0;
    8374  while(lastOffset < freeSpace2ndTo1stEnd)
    8375  {
    8376  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8377  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8378  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8379  {
    8380  ++nextAlloc2ndIndex;
    8381  }
    8382 
    8383  // Found non-null allocation.
    8384  if(nextAlloc2ndIndex < suballoc2ndCount)
    8385  {
    8386  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8387 
    8388  // 1. Process free space before this allocation.
    8389  if(lastOffset < suballoc.offset)
    8390  {
    8391  // There is free space from lastOffset to suballoc.offset.
    8392  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8393  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8394  }
    8395 
    8396  // 2. Process this allocation.
    8397  // There is allocation with suballoc.offset, suballoc.size.
    8398  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8399 
    8400  // 3. Prepare for next iteration.
    8401  lastOffset = suballoc.offset + suballoc.size;
    8402  ++nextAlloc2ndIndex;
    8403  }
    8404  // We are at the end.
    8405  else
    8406  {
    8407  if(lastOffset < freeSpace2ndTo1stEnd)
    8408  {
    8409  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8410  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8411  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8412  }
    8413 
    8414  // End of loop.
    8415  lastOffset = freeSpace2ndTo1stEnd;
    8416  }
    8417  }
    8418  }
    8419 
    8420  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8421  while(lastOffset < freeSpace1stTo2ndEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAllocIndex to the end.
    8424  while(nextAlloc1stIndex < suballoc1stCount &&
    8425  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc1stIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc1stIndex < suballoc1stCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8440  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8441  }
    8442 
    8443  // 2. Process this allocation.
    8444  // There is allocation with suballoc.offset, suballoc.size.
    8445  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc1stIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace1stTo2ndEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8457  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8458  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8459  }
    8460 
    8461  // End of loop.
    8462  lastOffset = freeSpace1stTo2ndEnd;
    8463  }
    8464  }
    8465 
    8466  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8467  {
    8468  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8469  while(lastOffset < size)
    8470  {
    8471  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8472  while(nextAlloc2ndIndex != SIZE_MAX &&
    8473  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8474  {
    8475  --nextAlloc2ndIndex;
    8476  }
    8477 
    8478  // Found non-null allocation.
    8479  if(nextAlloc2ndIndex != SIZE_MAX)
    8480  {
    8481  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8482 
    8483  // 1. Process free space before this allocation.
    8484  if(lastOffset < suballoc.offset)
    8485  {
    8486  // There is free space from lastOffset to suballoc.offset.
    8487  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8488  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8494 
    8495  // 3. Prepare for next iteration.
    8496  lastOffset = suballoc.offset + suballoc.size;
    8497  --nextAlloc2ndIndex;
    8498  }
    8499  // We are at the end.
    8500  else
    8501  {
    8502  if(lastOffset < size)
    8503  {
    8504  // There is free space from lastOffset to size.
    8505  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8506  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = size;
    8511  }
    8512  }
    8513  }
    8514 
    8515  PrintDetailedMap_End(json);
    8516 }
    8517 #endif // #if VMA_STATS_STRING_ENABLED
    8518 
    8519 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8520  uint32_t currentFrameIndex,
    8521  uint32_t frameInUseCount,
    8522  VkDeviceSize bufferImageGranularity,
    8523  VkDeviceSize allocSize,
    8524  VkDeviceSize allocAlignment,
    8525  bool upperAddress,
    8526  VmaSuballocationType allocType,
    8527  bool canMakeOtherLost,
    8528  uint32_t strategy,
    8529  VmaAllocationRequest* pAllocationRequest)
    8530 {
    8531  VMA_ASSERT(allocSize > 0);
    8532  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8533  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8534  VMA_HEAVY_ASSERT(Validate());
    8535 
    8536  const VkDeviceSize size = GetSize();
    8537  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8538  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8539 
    8540  if(upperAddress)
    8541  {
    8542  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8543  {
    8544  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8545  return false;
    8546  }
    8547 
    8548  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8549  if(allocSize > size)
    8550  {
    8551  return false;
    8552  }
    8553  VkDeviceSize resultBaseOffset = size - allocSize;
    8554  if(!suballocations2nd.empty())
    8555  {
    8556  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8557  resultBaseOffset = lastSuballoc.offset - allocSize;
    8558  if(allocSize > lastSuballoc.offset)
    8559  {
    8560  return false;
    8561  }
    8562  }
    8563 
    8564  // Start from offset equal to end of free space.
    8565  VkDeviceSize resultOffset = resultBaseOffset;
    8566 
    8567  // Apply VMA_DEBUG_MARGIN at the end.
    8568  if(VMA_DEBUG_MARGIN > 0)
    8569  {
    8570  if(resultOffset < VMA_DEBUG_MARGIN)
    8571  {
    8572  return false;
    8573  }
    8574  resultOffset -= VMA_DEBUG_MARGIN;
    8575  }
    8576 
    8577  // Apply alignment.
    8578  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8579 
    8580  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8581  // Make bigger alignment if necessary.
    8582  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8583  {
    8584  bool bufferImageGranularityConflict = false;
    8585  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8586  {
    8587  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8588  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8589  {
    8590  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8591  {
    8592  bufferImageGranularityConflict = true;
    8593  break;
    8594  }
    8595  }
    8596  else
    8597  // Already on previous page.
    8598  break;
    8599  }
    8600  if(bufferImageGranularityConflict)
    8601  {
    8602  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8603  }
    8604  }
    8605 
    8606  // There is enough free space.
    8607  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8608  suballocations1st.back().offset + suballocations1st.back().size :
    8609  0;
    8610  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8611  {
    8612  // Check previous suballocations for BufferImageGranularity conflicts.
    8613  // If conflict exists, allocation cannot be made here.
    8614  if(bufferImageGranularity > 1)
    8615  {
    8616  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8617  {
    8618  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8619  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8620  {
    8621  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8622  {
    8623  return false;
    8624  }
    8625  }
    8626  else
    8627  {
    8628  // Already on next page.
    8629  break;
    8630  }
    8631  }
    8632  }
    8633 
    8634  // All tests passed: Success.
    8635  pAllocationRequest->offset = resultOffset;
    8636  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8637  pAllocationRequest->sumItemSize = 0;
    8638  // pAllocationRequest->item unused.
    8639  pAllocationRequest->itemsToMakeLostCount = 0;
    8640  return true;
    8641  }
    8642  }
    8643  else // !upperAddress
    8644  {
    8645  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8646  {
    8647  // Try to allocate at the end of 1st vector.
    8648 
    8649  VkDeviceSize resultBaseOffset = 0;
    8650  if(!suballocations1st.empty())
    8651  {
    8652  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8653  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8654  }
    8655 
    8656  // Start from offset equal to beginning of free space.
    8657  VkDeviceSize resultOffset = resultBaseOffset;
    8658 
    8659  // Apply VMA_DEBUG_MARGIN at the beginning.
    8660  if(VMA_DEBUG_MARGIN > 0)
    8661  {
    8662  resultOffset += VMA_DEBUG_MARGIN;
    8663  }
    8664 
    8665  // Apply alignment.
    8666  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8667 
    8668  // Check previous suballocations for BufferImageGranularity conflicts.
    8669  // Make bigger alignment if necessary.
    8670  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8671  {
    8672  bool bufferImageGranularityConflict = false;
    8673  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8674  {
    8675  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8676  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8677  {
    8678  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8679  {
    8680  bufferImageGranularityConflict = true;
    8681  break;
    8682  }
    8683  }
    8684  else
    8685  // Already on previous page.
    8686  break;
    8687  }
    8688  if(bufferImageGranularityConflict)
    8689  {
    8690  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8691  }
    8692  }
    8693 
    8694  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8695  suballocations2nd.back().offset : size;
    8696 
    8697  // There is enough free space at the end after alignment.
    8698  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8699  {
    8700  // Check next suballocations for BufferImageGranularity conflicts.
    8701  // If conflict exists, allocation cannot be made here.
    8702  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8703  {
    8704  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8705  {
    8706  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8707  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8708  {
    8709  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8710  {
    8711  return false;
    8712  }
    8713  }
    8714  else
    8715  {
    8716  // Already on previous page.
    8717  break;
    8718  }
    8719  }
    8720  }
    8721 
    8722  // All tests passed: Success.
    8723  pAllocationRequest->offset = resultOffset;
    8724  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8725  pAllocationRequest->sumItemSize = 0;
    8726  // pAllocationRequest->item unused.
    8727  pAllocationRequest->itemsToMakeLostCount = 0;
    8728  return true;
    8729  }
    8730  }
    8731 
    8732  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8733  // beginning of 1st vector as the end of free space.
    8734  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8735  {
    8736  VMA_ASSERT(!suballocations1st.empty());
    8737 
    8738  VkDeviceSize resultBaseOffset = 0;
    8739  if(!suballocations2nd.empty())
    8740  {
    8741  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8742  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8743  }
    8744 
    8745  // Start from offset equal to beginning of free space.
    8746  VkDeviceSize resultOffset = resultBaseOffset;
    8747 
    8748  // Apply VMA_DEBUG_MARGIN at the beginning.
    8749  if(VMA_DEBUG_MARGIN > 0)
    8750  {
    8751  resultOffset += VMA_DEBUG_MARGIN;
    8752  }
    8753 
    8754  // Apply alignment.
    8755  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8756 
    8757  // Check previous suballocations for BufferImageGranularity conflicts.
    8758  // Make bigger alignment if necessary.
    8759  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8760  {
    8761  bool bufferImageGranularityConflict = false;
    8762  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8763  {
    8764  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8765  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8766  {
    8767  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8768  {
    8769  bufferImageGranularityConflict = true;
    8770  break;
    8771  }
    8772  }
    8773  else
    8774  // Already on previous page.
    8775  break;
    8776  }
    8777  if(bufferImageGranularityConflict)
    8778  {
    8779  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8780  }
    8781  }
    8782 
    8783  pAllocationRequest->itemsToMakeLostCount = 0;
    8784  pAllocationRequest->sumItemSize = 0;
    8785  size_t index1st = m_1stNullItemsBeginCount;
    8786 
    8787  if(canMakeOtherLost)
    8788  {
    8789  while(index1st < suballocations1st.size() &&
    8790  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8791  {
    8792  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8793  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8794  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8795  {
    8796  // No problem.
    8797  }
    8798  else
    8799  {
    8800  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8801  if(suballoc.hAllocation->CanBecomeLost() &&
    8802  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8803  {
    8804  ++pAllocationRequest->itemsToMakeLostCount;
    8805  pAllocationRequest->sumItemSize += suballoc.size;
    8806  }
    8807  else
    8808  {
    8809  return false;
    8810  }
    8811  }
    8812  ++index1st;
    8813  }
    8814 
    8815  // Check next suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, we must mark more allocations lost or fail.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  while(index1st < suballocations1st.size())
    8820  {
    8821  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8822  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8823  {
    8824  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8825  {
    8826  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8827  if(suballoc.hAllocation->CanBecomeLost() &&
    8828  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8829  {
    8830  ++pAllocationRequest->itemsToMakeLostCount;
    8831  pAllocationRequest->sumItemSize += suballoc.size;
    8832  }
    8833  else
    8834  {
    8835  return false;
    8836  }
    8837  }
    8838  }
    8839  else
    8840  {
    8841  // Already on next page.
    8842  break;
    8843  }
    8844  ++index1st;
    8845  }
    8846  }
    8847  }
    8848 
    8849  // There is enough free space at the end after alignment.
    8850  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8851  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8852  {
    8853  // Check next suballocations for BufferImageGranularity conflicts.
    8854  // If conflict exists, allocation cannot be made here.
    8855  if(bufferImageGranularity > 1)
    8856  {
    8857  for(size_t nextSuballocIndex = index1st;
    8858  nextSuballocIndex < suballocations1st.size();
    8859  nextSuballocIndex++)
    8860  {
    8861  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8862  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8863  {
    8864  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8865  {
    8866  return false;
    8867  }
    8868  }
    8869  else
    8870  {
    8871  // Already on next page.
    8872  break;
    8873  }
    8874  }
    8875  }
    8876 
    8877  // All tests passed: Success.
    8878  pAllocationRequest->offset = resultOffset;
    8879  pAllocationRequest->sumFreeSize =
    8880  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8881  - resultBaseOffset
    8882  - pAllocationRequest->sumItemSize;
    8883  // pAllocationRequest->item unused.
    8884  return true;
    8885  }
    8886  }
    8887  }
    8888 
    8889  return false;
    8890 }
    8891 
    8892 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8893  uint32_t currentFrameIndex,
    8894  uint32_t frameInUseCount,
    8895  VmaAllocationRequest* pAllocationRequest)
    8896 {
    8897  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8898  {
    8899  return true;
    8900  }
    8901 
    8902  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8903 
    8904  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8905  size_t index1st = m_1stNullItemsBeginCount;
    8906  size_t madeLostCount = 0;
    8907  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8908  {
    8909  VMA_ASSERT(index1st < suballocations1st.size());
    8910  VmaSuballocation& suballoc = suballocations1st[index1st];
    8911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8912  {
    8913  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8914  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8915  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8916  {
    8917  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8918  suballoc.hAllocation = VK_NULL_HANDLE;
    8919  m_SumFreeSize += suballoc.size;
    8920  ++m_1stNullItemsMiddleCount;
    8921  ++madeLostCount;
    8922  }
    8923  else
    8924  {
    8925  return false;
    8926  }
    8927  }
    8928  ++index1st;
    8929  }
    8930 
    8931  CleanupAfterFree();
    8932  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8933 
    8934  return true;
    8935 }
    8936 
    8937 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8938 {
    8939  uint32_t lostAllocationCount = 0;
    8940 
    8941  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8942  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8943  {
    8944  VmaSuballocation& suballoc = suballocations1st[i];
    8945  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8946  suballoc.hAllocation->CanBecomeLost() &&
    8947  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8948  {
    8949  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8950  suballoc.hAllocation = VK_NULL_HANDLE;
    8951  ++m_1stNullItemsMiddleCount;
    8952  m_SumFreeSize += suballoc.size;
    8953  ++lostAllocationCount;
    8954  }
    8955  }
    8956 
    8957  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8958  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8959  {
    8960  VmaSuballocation& suballoc = suballocations2nd[i];
    8961  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8962  suballoc.hAllocation->CanBecomeLost() &&
    8963  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8964  {
    8965  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8966  suballoc.hAllocation = VK_NULL_HANDLE;
    8967  ++m_2ndNullItemsCount;
    8968  ++lostAllocationCount;
    8969  }
    8970  }
    8971 
    8972  if(lostAllocationCount)
    8973  {
    8974  CleanupAfterFree();
    8975  }
    8976 
    8977  return lostAllocationCount;
    8978 }
    8979 
    8980 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8981 {
    8982  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8983  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8984  {
    8985  const VmaSuballocation& suballoc = suballocations1st[i];
    8986  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8987  {
    8988  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8989  {
    8990  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8991  return VK_ERROR_VALIDATION_FAILED_EXT;
    8992  }
    8993  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8994  {
    8995  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8996  return VK_ERROR_VALIDATION_FAILED_EXT;
    8997  }
    8998  }
    8999  }
    9000 
    9001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9002  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9003  {
    9004  const VmaSuballocation& suballoc = suballocations2nd[i];
    9005  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9006  {
    9007  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9008  {
    9009  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9010  return VK_ERROR_VALIDATION_FAILED_EXT;
    9011  }
    9012  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9013  {
    9014  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9015  return VK_ERROR_VALIDATION_FAILED_EXT;
    9016  }
    9017  }
    9018  }
    9019 
    9020  return VK_SUCCESS;
    9021 }
    9022 
    9023 void VmaBlockMetadata_Linear::Alloc(
    9024  const VmaAllocationRequest& request,
    9025  VmaSuballocationType type,
    9026  VkDeviceSize allocSize,
    9027  bool upperAddress,
    9028  VmaAllocation hAllocation)
    9029 {
    9030  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9031 
    9032  if(upperAddress)
    9033  {
    9034  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9035  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9036  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9037  suballocations2nd.push_back(newSuballoc);
    9038  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9039  }
    9040  else
    9041  {
    9042  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9043 
    9044  // First allocation.
    9045  if(suballocations1st.empty())
    9046  {
    9047  suballocations1st.push_back(newSuballoc);
    9048  }
    9049  else
    9050  {
    9051  // New allocation at the end of 1st vector.
    9052  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9053  {
    9054  // Check if it fits before the end of the block.
    9055  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9056  suballocations1st.push_back(newSuballoc);
    9057  }
    9058  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9059  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9060  {
    9061  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9062 
    9063  switch(m_2ndVectorMode)
    9064  {
    9065  case SECOND_VECTOR_EMPTY:
    9066  // First allocation from second part ring buffer.
    9067  VMA_ASSERT(suballocations2nd.empty());
    9068  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9069  break;
    9070  case SECOND_VECTOR_RING_BUFFER:
    9071  // 2-part ring buffer is already started.
    9072  VMA_ASSERT(!suballocations2nd.empty());
    9073  break;
    9074  case SECOND_VECTOR_DOUBLE_STACK:
    9075  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9076  break;
    9077  default:
    9078  VMA_ASSERT(0);
    9079  }
    9080 
    9081  suballocations2nd.push_back(newSuballoc);
    9082  }
    9083  else
    9084  {
    9085  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9086  }
    9087  }
    9088  }
    9089 
    9090  m_SumFreeSize -= newSuballoc.size;
    9091 }
    9092 
    9093 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9094 {
    9095  FreeAtOffset(allocation->GetOffset());
    9096 }
    9097 
    9098 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9099 {
    9100  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9101  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9102 
    9103  if(!suballocations1st.empty())
    9104  {
    9105  // First allocation: Mark it as next empty at the beginning.
    9106  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9107  if(firstSuballoc.offset == offset)
    9108  {
    9109  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9110  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9111  m_SumFreeSize += firstSuballoc.size;
    9112  ++m_1stNullItemsBeginCount;
    9113  CleanupAfterFree();
    9114  return;
    9115  }
    9116  }
    9117 
    9118  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9119  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9121  {
    9122  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9123  if(lastSuballoc.offset == offset)
    9124  {
    9125  m_SumFreeSize += lastSuballoc.size;
    9126  suballocations2nd.pop_back();
    9127  CleanupAfterFree();
    9128  return;
    9129  }
    9130  }
    9131  // Last allocation in 1st vector.
    9132  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9133  {
    9134  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9135  if(lastSuballoc.offset == offset)
    9136  {
    9137  m_SumFreeSize += lastSuballoc.size;
    9138  suballocations1st.pop_back();
    9139  CleanupAfterFree();
    9140  return;
    9141  }
    9142  }
    9143 
    9144  // Item from the middle of 1st vector.
    9145  {
    9146  VmaSuballocation refSuballoc;
    9147  refSuballoc.offset = offset;
    9148  // Rest of members stays uninitialized intentionally for better performance.
    9149  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9150  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9151  suballocations1st.end(),
    9152  refSuballoc);
    9153  if(it != suballocations1st.end())
    9154  {
    9155  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9156  it->hAllocation = VK_NULL_HANDLE;
    9157  ++m_1stNullItemsMiddleCount;
    9158  m_SumFreeSize += it->size;
    9159  CleanupAfterFree();
    9160  return;
    9161  }
    9162  }
    9163 
    9164  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9165  {
    9166  // Item from the middle of 2nd vector.
    9167  VmaSuballocation refSuballoc;
    9168  refSuballoc.offset = offset;
    9169  // Rest of members stays uninitialized intentionally for better performance.
    9170  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9171  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9172  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9173  if(it != suballocations2nd.end())
    9174  {
    9175  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9176  it->hAllocation = VK_NULL_HANDLE;
    9177  ++m_2ndNullItemsCount;
    9178  m_SumFreeSize += it->size;
    9179  CleanupAfterFree();
    9180  return;
    9181  }
    9182  }
    9183 
    9184  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9185 }
    9186 
    9187 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9188 {
    9189  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9190  const size_t suballocCount = AccessSuballocations1st().size();
    9191  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9192 }
    9193 
    9194 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9195 {
    9196  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9197  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9198 
    9199  if(IsEmpty())
    9200  {
    9201  suballocations1st.clear();
    9202  suballocations2nd.clear();
    9203  m_1stNullItemsBeginCount = 0;
    9204  m_1stNullItemsMiddleCount = 0;
    9205  m_2ndNullItemsCount = 0;
    9206  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9207  }
    9208  else
    9209  {
    9210  const size_t suballoc1stCount = suballocations1st.size();
    9211  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9212  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9213 
    9214  // Find more null items at the beginning of 1st vector.
    9215  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9216  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9217  {
    9218  ++m_1stNullItemsBeginCount;
    9219  --m_1stNullItemsMiddleCount;
    9220  }
    9221 
    9222  // Find more null items at the end of 1st vector.
    9223  while(m_1stNullItemsMiddleCount > 0 &&
    9224  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9225  {
    9226  --m_1stNullItemsMiddleCount;
    9227  suballocations1st.pop_back();
    9228  }
    9229 
    9230  // Find more null items at the end of 2nd vector.
    9231  while(m_2ndNullItemsCount > 0 &&
    9232  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9233  {
    9234  --m_2ndNullItemsCount;
    9235  suballocations2nd.pop_back();
    9236  }
    9237 
    9238  if(ShouldCompact1st())
    9239  {
    9240  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9241  size_t srcIndex = m_1stNullItemsBeginCount;
    9242  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9243  {
    9244  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9245  {
    9246  ++srcIndex;
    9247  }
    9248  if(dstIndex != srcIndex)
    9249  {
    9250  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9251  }
    9252  ++srcIndex;
    9253  }
    9254  suballocations1st.resize(nonNullItemCount);
    9255  m_1stNullItemsBeginCount = 0;
    9256  m_1stNullItemsMiddleCount = 0;
    9257  }
    9258 
    9259  // 2nd vector became empty.
    9260  if(suballocations2nd.empty())
    9261  {
    9262  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9263  }
    9264 
    9265  // 1st vector became empty.
    9266  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9267  {
    9268  suballocations1st.clear();
    9269  m_1stNullItemsBeginCount = 0;
    9270 
    9271  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9272  {
    9273  // Swap 1st with 2nd. Now 2nd is empty.
    9274  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9275  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9276  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9277  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9278  {
    9279  ++m_1stNullItemsBeginCount;
    9280  --m_1stNullItemsMiddleCount;
    9281  }
    9282  m_2ndNullItemsCount = 0;
    9283  m_1stVectorIndex ^= 1;
    9284  }
    9285  }
    9286  }
    9287 
    9288  VMA_HEAVY_ASSERT(Validate());
    9289 }
    9290 
    9291 
    9293 // class VmaBlockMetadata_Buddy
    9294 
    9295 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9296  VmaBlockMetadata(hAllocator),
    9297  m_Root(VMA_NULL),
    9298  m_AllocationCount(0),
    9299  m_FreeCount(1),
    9300  m_SumFreeSize(0)
    9301 {
    9302  memset(m_FreeList, 0, sizeof(m_FreeList));
    9303 }
    9304 
    9305 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9306 {
    9307  DeleteNode(m_Root);
    9308 }
    9309 
    9310 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9311 {
    9312  VmaBlockMetadata::Init(size);
    9313 
    9314  m_UsableSize = VmaPrevPow2(size);
    9315  m_SumFreeSize = m_UsableSize;
    9316 
    9317  // Calculate m_LevelCount.
    9318  m_LevelCount = 1;
    9319  while(m_LevelCount < MAX_LEVELS &&
    9320  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9321  {
    9322  ++m_LevelCount;
    9323  }
    9324 
    9325  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9326  rootNode->offset = 0;
    9327  rootNode->type = Node::TYPE_FREE;
    9328  rootNode->parent = VMA_NULL;
    9329  rootNode->buddy = VMA_NULL;
    9330 
    9331  m_Root = rootNode;
    9332  AddToFreeListFront(0, rootNode);
    9333 }
    9334 
    9335 bool VmaBlockMetadata_Buddy::Validate() const
    9336 {
    9337  // Validate tree.
    9338  ValidationContext ctx;
    9339  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9340  {
    9341  VMA_VALIDATE(false && "ValidateNode failed.");
    9342  }
    9343  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9344  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9345 
    9346  // Validate free node lists.
    9347  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9348  {
    9349  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9350  m_FreeList[level].front->free.prev == VMA_NULL);
    9351 
    9352  for(Node* node = m_FreeList[level].front;
    9353  node != VMA_NULL;
    9354  node = node->free.next)
    9355  {
    9356  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9357 
    9358  if(node->free.next == VMA_NULL)
    9359  {
    9360  VMA_VALIDATE(m_FreeList[level].back == node);
    9361  }
    9362  else
    9363  {
    9364  VMA_VALIDATE(node->free.next->free.prev == node);
    9365  }
    9366  }
    9367  }
    9368 
    9369  // Validate that free lists ar higher levels are empty.
    9370  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9371  {
    9372  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9373  }
    9374 
    9375  return true;
    9376 }
    9377 
    9378 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9379 {
    9380  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9381  {
    9382  if(m_FreeList[level].front != VMA_NULL)
    9383  {
    9384  return LevelToNodeSize(level);
    9385  }
    9386  }
    9387  return 0;
    9388 }
    9389 
    9390 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9391 {
    9392  const VkDeviceSize unusableSize = GetUnusableSize();
    9393 
    9394  outInfo.blockCount = 1;
    9395 
    9396  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9397  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9398 
    9399  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9400  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9401  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9402 
    9403  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9404 
    9405  if(unusableSize > 0)
    9406  {
    9407  ++outInfo.unusedRangeCount;
    9408  outInfo.unusedBytes += unusableSize;
    9409  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9410  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9411  }
    9412 }
    9413 
    9414 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9415 {
    9416  const VkDeviceSize unusableSize = GetUnusableSize();
    9417 
    9418  inoutStats.size += GetSize();
    9419  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9420  inoutStats.allocationCount += m_AllocationCount;
    9421  inoutStats.unusedRangeCount += m_FreeCount;
    9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9423 
    9424  if(unusableSize > 0)
    9425  {
    9426  ++inoutStats.unusedRangeCount;
    9427  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9428  }
    9429 }
    9430 
    9431 #if VMA_STATS_STRING_ENABLED
    9432 
    9433 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9434 {
    9435  // TODO optimize
    9436  VmaStatInfo stat;
    9437  CalcAllocationStatInfo(stat);
    9438 
    9439  PrintDetailedMap_Begin(
    9440  json,
    9441  stat.unusedBytes,
    9442  stat.allocationCount,
    9443  stat.unusedRangeCount);
    9444 
    9445  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9446 
    9447  const VkDeviceSize unusableSize = GetUnusableSize();
    9448  if(unusableSize > 0)
    9449  {
    9450  PrintDetailedMap_UnusedRange(json,
    9451  m_UsableSize, // offset
    9452  unusableSize); // size
    9453  }
    9454 
    9455  PrintDetailedMap_End(json);
    9456 }
    9457 
    9458 #endif // #if VMA_STATS_STRING_ENABLED
    9459 
    9460 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9461  uint32_t currentFrameIndex,
    9462  uint32_t frameInUseCount,
    9463  VkDeviceSize bufferImageGranularity,
    9464  VkDeviceSize allocSize,
    9465  VkDeviceSize allocAlignment,
    9466  bool upperAddress,
    9467  VmaSuballocationType allocType,
    9468  bool canMakeOtherLost,
    9469  uint32_t strategy,
    9470  VmaAllocationRequest* pAllocationRequest)
    9471 {
    9472  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9473 
    9474  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9475  // Whenever it might be an OPTIMAL image...
    9476  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9477  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9478  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9479  {
    9480  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9481  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9482  }
    9483 
    9484  if(allocSize > m_UsableSize)
    9485  {
    9486  return false;
    9487  }
    9488 
    9489  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9490  for(uint32_t level = targetLevel + 1; level--; )
    9491  {
    9492  for(Node* freeNode = m_FreeList[level].front;
    9493  freeNode != VMA_NULL;
    9494  freeNode = freeNode->free.next)
    9495  {
    9496  if(freeNode->offset % allocAlignment == 0)
    9497  {
    9498  pAllocationRequest->offset = freeNode->offset;
    9499  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9500  pAllocationRequest->sumItemSize = 0;
    9501  pAllocationRequest->itemsToMakeLostCount = 0;
    9502  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9503  return true;
    9504  }
    9505  }
    9506  }
    9507 
    9508  return false;
    9509 }
    9510 
    9511 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9512  uint32_t currentFrameIndex,
    9513  uint32_t frameInUseCount,
    9514  VmaAllocationRequest* pAllocationRequest)
    9515 {
    9516  /*
    9517  Lost allocations are not supported in buddy allocator at the moment.
    9518  Support might be added in the future.
    9519  */
    9520  return pAllocationRequest->itemsToMakeLostCount == 0;
    9521 }
    9522 
    9523 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9524 {
    9525  /*
    9526  Lost allocations are not supported in buddy allocator at the moment.
    9527  Support might be added in the future.
    9528  */
    9529  return 0;
    9530 }
    9531 
    9532 void VmaBlockMetadata_Buddy::Alloc(
    9533  const VmaAllocationRequest& request,
    9534  VmaSuballocationType type,
    9535  VkDeviceSize allocSize,
    9536  bool upperAddress,
    9537  VmaAllocation hAllocation)
    9538 {
    9539  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9540  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9541 
    9542  Node* currNode = m_FreeList[currLevel].front;
    9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9544  while(currNode->offset != request.offset)
    9545  {
    9546  currNode = currNode->free.next;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  }
    9549 
    9550  // Go down, splitting free nodes.
    9551  while(currLevel < targetLevel)
    9552  {
    9553  // currNode is already first free node at currLevel.
    9554  // Remove it from list of free nodes at this currLevel.
    9555  RemoveFromFreeList(currLevel, currNode);
    9556 
    9557  const uint32_t childrenLevel = currLevel + 1;
    9558 
    9559  // Create two free sub-nodes.
    9560  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9561  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9562 
    9563  leftChild->offset = currNode->offset;
    9564  leftChild->type = Node::TYPE_FREE;
    9565  leftChild->parent = currNode;
    9566  leftChild->buddy = rightChild;
    9567 
    9568  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9569  rightChild->type = Node::TYPE_FREE;
    9570  rightChild->parent = currNode;
    9571  rightChild->buddy = leftChild;
    9572 
    9573  // Convert current currNode to split type.
    9574  currNode->type = Node::TYPE_SPLIT;
    9575  currNode->split.leftChild = leftChild;
    9576 
    9577  // Add child nodes to free list. Order is important!
    9578  AddToFreeListFront(childrenLevel, rightChild);
    9579  AddToFreeListFront(childrenLevel, leftChild);
    9580 
    9581  ++m_FreeCount;
    9582  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9583  ++currLevel;
    9584  currNode = m_FreeList[currLevel].front;
    9585 
    9586  /*
    9587  We can be sure that currNode, as left child of node previously split,
    9588  also fullfills the alignment requirement.
    9589  */
    9590  }
    9591 
    9592  // Remove from free list.
    9593  VMA_ASSERT(currLevel == targetLevel &&
    9594  currNode != VMA_NULL &&
    9595  currNode->type == Node::TYPE_FREE);
    9596  RemoveFromFreeList(currLevel, currNode);
    9597 
    9598  // Convert to allocation node.
    9599  currNode->type = Node::TYPE_ALLOCATION;
    9600  currNode->allocation.alloc = hAllocation;
    9601 
    9602  ++m_AllocationCount;
    9603  --m_FreeCount;
    9604  m_SumFreeSize -= allocSize;
    9605 }
    9606 
    9607 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9608 {
    9609  if(node->type == Node::TYPE_SPLIT)
    9610  {
    9611  DeleteNode(node->split.leftChild->buddy);
    9612  DeleteNode(node->split.leftChild);
    9613  }
    9614 
    9615  vma_delete(GetAllocationCallbacks(), node);
    9616 }
    9617 
    9618 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9619 {
    9620  VMA_VALIDATE(level < m_LevelCount);
    9621  VMA_VALIDATE(curr->parent == parent);
    9622  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9623  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9624  switch(curr->type)
    9625  {
    9626  case Node::TYPE_FREE:
    9627  // curr->free.prev, next are validated separately.
    9628  ctx.calculatedSumFreeSize += levelNodeSize;
    9629  ++ctx.calculatedFreeCount;
    9630  break;
    9631  case Node::TYPE_ALLOCATION:
    9632  ++ctx.calculatedAllocationCount;
    9633  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9634  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9635  break;
    9636  case Node::TYPE_SPLIT:
    9637  {
    9638  const uint32_t childrenLevel = level + 1;
    9639  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9640  const Node* const leftChild = curr->split.leftChild;
    9641  VMA_VALIDATE(leftChild != VMA_NULL);
    9642  VMA_VALIDATE(leftChild->offset == curr->offset);
    9643  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9644  {
    9645  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9646  }
    9647  const Node* const rightChild = leftChild->buddy;
    9648  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9649  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9650  {
    9651  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9652  }
    9653  }
    9654  break;
    9655  default:
    9656  return false;
    9657  }
    9658 
    9659  return true;
    9660 }
    9661 
    9662 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9663 {
    9664  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9665  uint32_t level = 0;
    9666  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9667  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9668  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9669  {
    9670  ++level;
    9671  currLevelNodeSize = nextLevelNodeSize;
    9672  nextLevelNodeSize = currLevelNodeSize >> 1;
    9673  }
    9674  return level;
    9675 }
    9676 
    9677 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9678 {
    9679  // Find node and level.
    9680  Node* node = m_Root;
    9681  VkDeviceSize nodeOffset = 0;
    9682  uint32_t level = 0;
    9683  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9684  while(node->type == Node::TYPE_SPLIT)
    9685  {
    9686  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9687  if(offset < nodeOffset + nextLevelSize)
    9688  {
    9689  node = node->split.leftChild;
    9690  }
    9691  else
    9692  {
    9693  node = node->split.leftChild->buddy;
    9694  nodeOffset += nextLevelSize;
    9695  }
    9696  ++level;
    9697  levelNodeSize = nextLevelSize;
    9698  }
    9699 
    9700  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9701  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9702 
    9703  ++m_FreeCount;
    9704  --m_AllocationCount;
    9705  m_SumFreeSize += alloc->GetSize();
    9706 
    9707  node->type = Node::TYPE_FREE;
    9708 
    9709  // Join free nodes if possible.
    9710  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9711  {
    9712  RemoveFromFreeList(level, node->buddy);
    9713  Node* const parent = node->parent;
    9714 
    9715  vma_delete(GetAllocationCallbacks(), node->buddy);
    9716  vma_delete(GetAllocationCallbacks(), node);
    9717  parent->type = Node::TYPE_FREE;
    9718 
    9719  node = parent;
    9720  --level;
    9721  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9722  --m_FreeCount;
    9723  }
    9724 
    9725  AddToFreeListFront(level, node);
    9726 }
    9727 
    9728 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9729 {
    9730  switch(node->type)
    9731  {
    9732  case Node::TYPE_FREE:
    9733  ++outInfo.unusedRangeCount;
    9734  outInfo.unusedBytes += levelNodeSize;
    9735  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9736  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9737  break;
    9738  case Node::TYPE_ALLOCATION:
    9739  {
    9740  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9741  ++outInfo.allocationCount;
    9742  outInfo.usedBytes += allocSize;
    9743  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9744  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9745 
    9746  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9747  if(unusedRangeSize > 0)
    9748  {
    9749  ++outInfo.unusedRangeCount;
    9750  outInfo.unusedBytes += unusedRangeSize;
    9751  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9752  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9753  }
    9754  }
    9755  break;
    9756  case Node::TYPE_SPLIT:
    9757  {
    9758  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9759  const Node* const leftChild = node->split.leftChild;
    9760  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9761  const Node* const rightChild = leftChild->buddy;
    9762  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9763  }
    9764  break;
    9765  default:
    9766  VMA_ASSERT(0);
    9767  }
    9768 }
    9769 
    9770 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9771 {
    9772  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9773 
    9774  // List is empty.
    9775  Node* const frontNode = m_FreeList[level].front;
    9776  if(frontNode == VMA_NULL)
    9777  {
    9778  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9779  node->free.prev = node->free.next = VMA_NULL;
    9780  m_FreeList[level].front = m_FreeList[level].back = node;
    9781  }
    9782  else
    9783  {
    9784  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9785  node->free.prev = VMA_NULL;
    9786  node->free.next = frontNode;
    9787  frontNode->free.prev = node;
    9788  m_FreeList[level].front = node;
    9789  }
    9790 }
    9791 
    9792 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9793 {
    9794  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9795 
    9796  // It is at the front.
    9797  if(node->free.prev == VMA_NULL)
    9798  {
    9799  VMA_ASSERT(m_FreeList[level].front == node);
    9800  m_FreeList[level].front = node->free.next;
    9801  }
    9802  else
    9803  {
    9804  Node* const prevFreeNode = node->free.prev;
    9805  VMA_ASSERT(prevFreeNode->free.next == node);
    9806  prevFreeNode->free.next = node->free.next;
    9807  }
    9808 
    9809  // It is at the back.
    9810  if(node->free.next == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].back == node);
    9813  m_FreeList[level].back = node->free.prev;
    9814  }
    9815  else
    9816  {
    9817  Node* const nextFreeNode = node->free.next;
    9818  VMA_ASSERT(nextFreeNode->free.prev == node);
    9819  nextFreeNode->free.prev = node->free.prev;
    9820  }
    9821 }
    9822 
    9823 #if VMA_STATS_STRING_ENABLED
    9824 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9825 {
    9826  switch(node->type)
    9827  {
    9828  case Node::TYPE_FREE:
    9829  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9830  break;
    9831  case Node::TYPE_ALLOCATION:
    9832  {
    9833  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9834  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9835  if(allocSize < levelNodeSize)
    9836  {
    9837  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9838  }
    9839  }
    9840  break;
    9841  case Node::TYPE_SPLIT:
    9842  {
    9843  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = node->split.leftChild;
    9845  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9846  const Node* const rightChild = leftChild->buddy;
    9847  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9848  }
    9849  break;
    9850  default:
    9851  VMA_ASSERT(0);
    9852  }
    9853 }
    9854 #endif // #if VMA_STATS_STRING_ENABLED
    9855 
    9856 
    9858 // class VmaDeviceMemoryBlock
    9859 
    9860 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9861  m_pMetadata(VMA_NULL),
    9862  m_MemoryTypeIndex(UINT32_MAX),
    9863  m_Id(0),
    9864  m_hMemory(VK_NULL_HANDLE),
    9865  m_MapCount(0),
    9866  m_pMappedData(VMA_NULL)
    9867 {
    9868 }
    9869 
    9870 void VmaDeviceMemoryBlock::Init(
    9871  VmaAllocator hAllocator,
    9872  uint32_t newMemoryTypeIndex,
    9873  VkDeviceMemory newMemory,
    9874  VkDeviceSize newSize,
    9875  uint32_t id,
    9876  uint32_t algorithm)
    9877 {
    9878  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9879 
    9880  m_MemoryTypeIndex = newMemoryTypeIndex;
    9881  m_Id = id;
    9882  m_hMemory = newMemory;
    9883 
    9884  switch(algorithm)
    9885  {
    9887  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9888  break;
    9890  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9891  break;
    9892  default:
    9893  VMA_ASSERT(0);
    9894  // Fall-through.
    9895  case 0:
    9896  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9897  }
    9898  m_pMetadata->Init(newSize);
    9899 }
    9900 
    9901 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9902 {
    9903  // This is the most important assert in the entire library.
    9904  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9905  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9906 
    9907  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9908  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9909  m_hMemory = VK_NULL_HANDLE;
    9910 
    9911  vma_delete(allocator, m_pMetadata);
    9912  m_pMetadata = VMA_NULL;
    9913 }
    9914 
    9915 bool VmaDeviceMemoryBlock::Validate() const
    9916 {
    9917  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9918  (m_pMetadata->GetSize() != 0));
    9919 
    9920  return m_pMetadata->Validate();
    9921 }
    9922 
    9923 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9924 {
    9925  void* pData = nullptr;
    9926  VkResult res = Map(hAllocator, 1, &pData);
    9927  if(res != VK_SUCCESS)
    9928  {
    9929  return res;
    9930  }
    9931 
    9932  res = m_pMetadata->CheckCorruption(pData);
    9933 
    9934  Unmap(hAllocator, 1);
    9935 
    9936  return res;
    9937 }
    9938 
    9939 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9940 {
    9941  if(count == 0)
    9942  {
    9943  return VK_SUCCESS;
    9944  }
    9945 
    9946  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9947  if(m_MapCount != 0)
    9948  {
    9949  m_MapCount += count;
    9950  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9951  if(ppData != VMA_NULL)
    9952  {
    9953  *ppData = m_pMappedData;
    9954  }
    9955  return VK_SUCCESS;
    9956  }
    9957  else
    9958  {
    9959  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9960  hAllocator->m_hDevice,
    9961  m_hMemory,
    9962  0, // offset
    9963  VK_WHOLE_SIZE,
    9964  0, // flags
    9965  &m_pMappedData);
    9966  if(result == VK_SUCCESS)
    9967  {
    9968  if(ppData != VMA_NULL)
    9969  {
    9970  *ppData = m_pMappedData;
    9971  }
    9972  m_MapCount = count;
    9973  }
    9974  return result;
    9975  }
    9976 }
    9977 
    9978 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9979 {
    9980  if(count == 0)
    9981  {
    9982  return;
    9983  }
    9984 
    9985  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9986  if(m_MapCount >= count)
    9987  {
    9988  m_MapCount -= count;
    9989  if(m_MapCount == 0)
    9990  {
    9991  m_pMappedData = VMA_NULL;
    9992  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9993  }
    9994  }
    9995  else
    9996  {
    9997  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    9998  }
    9999 }
    10000 
    10001 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10002 {
    10003  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10004  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10005 
    10006  void* pData;
    10007  VkResult res = Map(hAllocator, 1, &pData);
    10008  if(res != VK_SUCCESS)
    10009  {
    10010  return res;
    10011  }
    10012 
    10013  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10014  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10015 
    10016  Unmap(hAllocator, 1);
    10017 
    10018  return VK_SUCCESS;
    10019 }
    10020 
    10021 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10022 {
    10023  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10024  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10025 
    10026  void* pData;
    10027  VkResult res = Map(hAllocator, 1, &pData);
    10028  if(res != VK_SUCCESS)
    10029  {
    10030  return res;
    10031  }
    10032 
    10033  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10034  {
    10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10036  }
    10037  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10040  }
    10041 
    10042  Unmap(hAllocator, 1);
    10043 
    10044  return VK_SUCCESS;
    10045 }
    10046 
    10047 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10048  const VmaAllocator hAllocator,
    10049  const VmaAllocation hAllocation,
    10050  VkBuffer hBuffer)
    10051 {
    10052  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10053  hAllocation->GetBlock() == this);
    10054  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10055  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10056  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10057  hAllocator->m_hDevice,
    10058  hBuffer,
    10059  m_hMemory,
    10060  hAllocation->GetOffset());
    10061 }
    10062 
    10063 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10064  const VmaAllocator hAllocator,
    10065  const VmaAllocation hAllocation,
    10066  VkImage hImage)
    10067 {
    10068  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10069  hAllocation->GetBlock() == this);
    10070  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10071  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10072  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10073  hAllocator->m_hDevice,
    10074  hImage,
    10075  m_hMemory,
    10076  hAllocation->GetOffset());
    10077 }
    10078 
    10079 static void InitStatInfo(VmaStatInfo& outInfo)
    10080 {
    10081  memset(&outInfo, 0, sizeof(outInfo));
    10082  outInfo.allocationSizeMin = UINT64_MAX;
    10083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10084 }
    10085 
    10086 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10087 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10088 {
    10089  inoutInfo.blockCount += srcInfo.blockCount;
    10090  inoutInfo.allocationCount += srcInfo.allocationCount;
    10091  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10092  inoutInfo.usedBytes += srcInfo.usedBytes;
    10093  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10094  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10095  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10096  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10097  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10098 }
    10099 
    10100 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10101 {
    10102  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10103  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10104  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10105  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10106 }
    10107 
    10108 VmaPool_T::VmaPool_T(
    10109  VmaAllocator hAllocator,
    10110  const VmaPoolCreateInfo& createInfo,
    10111  VkDeviceSize preferredBlockSize) :
    10112  m_BlockVector(
    10113  hAllocator,
    10114  createInfo.memoryTypeIndex,
    10115  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10116  createInfo.minBlockCount,
    10117  createInfo.maxBlockCount,
    10118  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10119  createInfo.frameInUseCount,
    10120  true, // isCustomPool
    10121  createInfo.blockSize != 0, // explicitBlockSize
    10122  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10123  m_Id(0)
    10124 {
    10125 }
    10126 
    10127 VmaPool_T::~VmaPool_T()
    10128 {
    10129 }
    10130 
    10131 #if VMA_STATS_STRING_ENABLED
    10132 
    10133 #endif // #if VMA_STATS_STRING_ENABLED
    10134 
    10135 VmaBlockVector::VmaBlockVector(
    10136  VmaAllocator hAllocator,
    10137  uint32_t memoryTypeIndex,
    10138  VkDeviceSize preferredBlockSize,
    10139  size_t minBlockCount,
    10140  size_t maxBlockCount,
    10141  VkDeviceSize bufferImageGranularity,
    10142  uint32_t frameInUseCount,
    10143  bool isCustomPool,
    10144  bool explicitBlockSize,
    10145  uint32_t algorithm) :
    10146  m_hAllocator(hAllocator),
    10147  m_MemoryTypeIndex(memoryTypeIndex),
    10148  m_PreferredBlockSize(preferredBlockSize),
    10149  m_MinBlockCount(minBlockCount),
    10150  m_MaxBlockCount(maxBlockCount),
    10151  m_BufferImageGranularity(bufferImageGranularity),
    10152  m_FrameInUseCount(frameInUseCount),
    10153  m_IsCustomPool(isCustomPool),
    10154  m_ExplicitBlockSize(explicitBlockSize),
    10155  m_Algorithm(algorithm),
    10156  m_HasEmptyBlock(false),
    10157  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10158  m_pDefragmentator(VMA_NULL),
    10159  m_NextBlockId(0)
    10160 {
    10161 }
    10162 
    10163 VmaBlockVector::~VmaBlockVector()
    10164 {
    10165  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10166 
    10167  for(size_t i = m_Blocks.size(); i--; )
    10168  {
    10169  m_Blocks[i]->Destroy(m_hAllocator);
    10170  vma_delete(m_hAllocator, m_Blocks[i]);
    10171  }
    10172 }
    10173 
    10174 VkResult VmaBlockVector::CreateMinBlocks()
    10175 {
    10176  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10177  {
    10178  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10179  if(res != VK_SUCCESS)
    10180  {
    10181  return res;
    10182  }
    10183  }
    10184  return VK_SUCCESS;
    10185 }
    10186 
    10187 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10188 {
    10189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10190 
    10191  const size_t blockCount = m_Blocks.size();
    10192 
    10193  pStats->size = 0;
    10194  pStats->unusedSize = 0;
    10195  pStats->allocationCount = 0;
    10196  pStats->unusedRangeCount = 0;
    10197  pStats->unusedRangeSizeMax = 0;
    10198  pStats->blockCount = blockCount;
    10199 
    10200  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10201  {
    10202  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10203  VMA_ASSERT(pBlock);
    10204  VMA_HEAVY_ASSERT(pBlock->Validate());
    10205  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10206  }
    10207 }
    10208 
    10209 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10210 {
    10211  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10212  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10213  (VMA_DEBUG_MARGIN > 0) &&
    10214  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10215 }
    10216 
    10217 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10218 
    10219 VkResult VmaBlockVector::Allocate(
    10220  VmaPool hCurrentPool,
    10221  uint32_t currentFrameIndex,
    10222  VkDeviceSize size,
    10223  VkDeviceSize alignment,
    10224  const VmaAllocationCreateInfo& createInfo,
    10225  VmaSuballocationType suballocType,
    10226  VmaAllocation* pAllocation)
    10227 {
    10228  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10229  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10230  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10231  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10232  const bool canCreateNewBlock =
    10233  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10234  (m_Blocks.size() < m_MaxBlockCount);
    10235  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10236 
    10237  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10238  // Which in turn is available only when maxBlockCount = 1.
    10239  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10240  {
    10241  canMakeOtherLost = false;
    10242  }
    10243 
    10244  // Upper address can only be used with linear allocator and within single memory block.
    10245  if(isUpperAddress &&
    10246  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10247  {
    10248  return VK_ERROR_FEATURE_NOT_PRESENT;
    10249  }
    10250 
    10251  // Validate strategy.
    10252  switch(strategy)
    10253  {
    10254  case 0:
    10256  break;
    10260  break;
    10261  default:
    10262  return VK_ERROR_FEATURE_NOT_PRESENT;
    10263  }
    10264 
    10265  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10266  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10267  {
    10268  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10269  }
    10270 
    10271  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10272 
    10273  /*
    10274  Under certain condition, this whole section can be skipped for optimization, so
    10275  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10276  e.g. for custom pools with linear algorithm.
    10277  */
    10278  if(!canMakeOtherLost || canCreateNewBlock)
    10279  {
    10280  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10281  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10283 
    10284  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10285  {
    10286  // Use only last block.
    10287  if(!m_Blocks.empty())
    10288  {
    10289  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10290  VMA_ASSERT(pCurrBlock);
    10291  VkResult res = AllocateFromBlock(
    10292  pCurrBlock,
    10293  hCurrentPool,
    10294  currentFrameIndex,
    10295  size,
    10296  alignment,
    10297  allocFlagsCopy,
    10298  createInfo.pUserData,
    10299  suballocType,
    10300  strategy,
    10301  pAllocation);
    10302  if(res == VK_SUCCESS)
    10303  {
    10304  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10305  return VK_SUCCESS;
    10306  }
    10307  }
    10308  }
    10309  else
    10310  {
    10312  {
    10313  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10314  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10315  {
    10316  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10317  VMA_ASSERT(pCurrBlock);
    10318  VkResult res = AllocateFromBlock(
    10319  pCurrBlock,
    10320  hCurrentPool,
    10321  currentFrameIndex,
    10322  size,
    10323  alignment,
    10324  allocFlagsCopy,
    10325  createInfo.pUserData,
    10326  suballocType,
    10327  strategy,
    10328  pAllocation);
    10329  if(res == VK_SUCCESS)
    10330  {
    10331  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10332  return VK_SUCCESS;
    10333  }
    10334  }
    10335  }
    10336  else // WORST_FIT, FIRST_FIT
    10337  {
    10338  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10339  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10340  {
    10341  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10342  VMA_ASSERT(pCurrBlock);
    10343  VkResult res = AllocateFromBlock(
    10344  pCurrBlock,
    10345  hCurrentPool,
    10346  currentFrameIndex,
    10347  size,
    10348  alignment,
    10349  allocFlagsCopy,
    10350  createInfo.pUserData,
    10351  suballocType,
    10352  strategy,
    10353  pAllocation);
    10354  if(res == VK_SUCCESS)
    10355  {
    10356  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10357  return VK_SUCCESS;
    10358  }
    10359  }
    10360  }
    10361  }
    10362 
    10363  // 2. Try to create new block.
    10364  if(canCreateNewBlock)
    10365  {
    10366  // Calculate optimal size for new block.
    10367  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10368  uint32_t newBlockSizeShift = 0;
    10369  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10370 
    10371  if(!m_ExplicitBlockSize)
    10372  {
    10373  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10374  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10375  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10376  {
    10377  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10378  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10379  {
    10380  newBlockSize = smallerNewBlockSize;
    10381  ++newBlockSizeShift;
    10382  }
    10383  else
    10384  {
    10385  break;
    10386  }
    10387  }
    10388  }
    10389 
    10390  size_t newBlockIndex = 0;
    10391  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10392  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10393  if(!m_ExplicitBlockSize)
    10394  {
    10395  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10396  {
    10397  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10398  if(smallerNewBlockSize >= size)
    10399  {
    10400  newBlockSize = smallerNewBlockSize;
    10401  ++newBlockSizeShift;
    10402  res = CreateBlock(newBlockSize, &newBlockIndex);
    10403  }
    10404  else
    10405  {
    10406  break;
    10407  }
    10408  }
    10409  }
    10410 
    10411  if(res == VK_SUCCESS)
    10412  {
    10413  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10414  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10415 
    10416  res = AllocateFromBlock(
    10417  pBlock,
    10418  hCurrentPool,
    10419  currentFrameIndex,
    10420  size,
    10421  alignment,
    10422  allocFlagsCopy,
    10423  createInfo.pUserData,
    10424  suballocType,
    10425  strategy,
    10426  pAllocation);
    10427  if(res == VK_SUCCESS)
    10428  {
    10429  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10430  return VK_SUCCESS;
    10431  }
    10432  else
    10433  {
    10434  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10435  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10436  }
    10437  }
    10438  }
    10439  }
    10440 
    10441  // 3. Try to allocate from existing blocks with making other allocations lost.
    10442  if(canMakeOtherLost)
    10443  {
    10444  uint32_t tryIndex = 0;
    10445  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10446  {
    10447  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10448  VmaAllocationRequest bestRequest = {};
    10449  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10450 
    10451  // 1. Search existing allocations.
    10453  {
    10454  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10455  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10456  {
    10457  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10458  VMA_ASSERT(pCurrBlock);
    10459  VmaAllocationRequest currRequest = {};
    10460  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10461  currentFrameIndex,
    10462  m_FrameInUseCount,
    10463  m_BufferImageGranularity,
    10464  size,
    10465  alignment,
    10466  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10467  suballocType,
    10468  canMakeOtherLost,
    10469  strategy,
    10470  &currRequest))
    10471  {
    10472  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10473  if(pBestRequestBlock == VMA_NULL ||
    10474  currRequestCost < bestRequestCost)
    10475  {
    10476  pBestRequestBlock = pCurrBlock;
    10477  bestRequest = currRequest;
    10478  bestRequestCost = currRequestCost;
    10479 
    10480  if(bestRequestCost == 0)
    10481  {
    10482  break;
    10483  }
    10484  }
    10485  }
    10486  }
    10487  }
    10488  else // WORST_FIT, FIRST_FIT
    10489  {
    10490  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10491  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10494  VMA_ASSERT(pCurrBlock);
    10495  VmaAllocationRequest currRequest = {};
    10496  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10497  currentFrameIndex,
    10498  m_FrameInUseCount,
    10499  m_BufferImageGranularity,
    10500  size,
    10501  alignment,
    10502  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10503  suballocType,
    10504  canMakeOtherLost,
    10505  strategy,
    10506  &currRequest))
    10507  {
    10508  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10509  if(pBestRequestBlock == VMA_NULL ||
    10510  currRequestCost < bestRequestCost ||
    10512  {
    10513  pBestRequestBlock = pCurrBlock;
    10514  bestRequest = currRequest;
    10515  bestRequestCost = currRequestCost;
    10516 
    10517  if(bestRequestCost == 0 ||
    10519  {
    10520  break;
    10521  }
    10522  }
    10523  }
    10524  }
    10525  }
    10526 
    10527  if(pBestRequestBlock != VMA_NULL)
    10528  {
    10529  if(mapped)
    10530  {
    10531  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10532  if(res != VK_SUCCESS)
    10533  {
    10534  return res;
    10535  }
    10536  }
    10537 
    10538  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10539  currentFrameIndex,
    10540  m_FrameInUseCount,
    10541  &bestRequest))
    10542  {
    10543  // We no longer have an empty Allocation.
    10544  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10545  {
    10546  m_HasEmptyBlock = false;
    10547  }
    10548  // Allocate from this pBlock.
    10549  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10550  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10551  (*pAllocation)->InitBlockAllocation(
    10552  hCurrentPool,
    10553  pBestRequestBlock,
    10554  bestRequest.offset,
    10555  alignment,
    10556  size,
    10557  suballocType,
    10558  mapped,
    10559  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10560  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10561  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10562  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10563  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10564  {
    10565  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10566  }
    10567  if(IsCorruptionDetectionEnabled())
    10568  {
    10569  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10570  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10571  }
    10572  return VK_SUCCESS;
    10573  }
    10574  // else: Some allocations must have been touched while we are here. Next try.
    10575  }
    10576  else
    10577  {
    10578  // Could not find place in any of the blocks - break outer loop.
    10579  break;
    10580  }
    10581  }
    10582  /* Maximum number of tries exceeded - a very unlike event when many other
    10583  threads are simultaneously touching allocations making it impossible to make
    10584  lost at the same time as we try to allocate. */
    10585  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10586  {
    10587  return VK_ERROR_TOO_MANY_OBJECTS;
    10588  }
    10589  }
    10590 
    10591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10592 }
    10593 
    10594 void VmaBlockVector::Free(
    10595  VmaAllocation hAllocation)
    10596 {
    10597  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10598 
    10599  // Scope for lock.
    10600  {
    10601  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10602 
    10603  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10604 
    10605  if(IsCorruptionDetectionEnabled())
    10606  {
    10607  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10608  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10609  }
    10610 
    10611  if(hAllocation->IsPersistentMap())
    10612  {
    10613  pBlock->Unmap(m_hAllocator, 1);
    10614  }
    10615 
    10616  pBlock->m_pMetadata->Free(hAllocation);
    10617  VMA_HEAVY_ASSERT(pBlock->Validate());
    10618 
    10619  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10620 
    10621  // pBlock became empty after this deallocation.
    10622  if(pBlock->m_pMetadata->IsEmpty())
    10623  {
    10624  // Already has empty Allocation. We don't want to have two, so delete this one.
    10625  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10626  {
    10627  pBlockToDelete = pBlock;
    10628  Remove(pBlock);
    10629  }
    10630  // We now have first empty block.
    10631  else
    10632  {
    10633  m_HasEmptyBlock = true;
    10634  }
    10635  }
    10636  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10637  // (This is optional, heuristics.)
    10638  else if(m_HasEmptyBlock)
    10639  {
    10640  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10641  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10642  {
    10643  pBlockToDelete = pLastBlock;
    10644  m_Blocks.pop_back();
    10645  m_HasEmptyBlock = false;
    10646  }
    10647  }
    10648 
    10649  IncrementallySortBlocks();
    10650  }
    10651 
    10652  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10653  // lock, for performance reason.
    10654  if(pBlockToDelete != VMA_NULL)
    10655  {
    10656  VMA_DEBUG_LOG(" Deleted empty allocation");
    10657  pBlockToDelete->Destroy(m_hAllocator);
    10658  vma_delete(m_hAllocator, pBlockToDelete);
    10659  }
    10660 }
    10661 
    10662 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10663 {
    10664  VkDeviceSize result = 0;
    10665  for(size_t i = m_Blocks.size(); i--; )
    10666  {
    10667  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10668  if(result >= m_PreferredBlockSize)
    10669  {
    10670  break;
    10671  }
    10672  }
    10673  return result;
    10674 }
    10675 
    10676 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10677 {
    10678  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10679  {
    10680  if(m_Blocks[blockIndex] == pBlock)
    10681  {
    10682  VmaVectorRemove(m_Blocks, blockIndex);
    10683  return;
    10684  }
    10685  }
    10686  VMA_ASSERT(0);
    10687 }
    10688 
    10689 void VmaBlockVector::IncrementallySortBlocks()
    10690 {
    10691  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10692  {
    10693  // Bubble sort only until first swap.
    10694  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10695  {
    10696  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10697  {
    10698  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10699  return;
    10700  }
    10701  }
    10702  }
    10703 }
    10704 
    10705 VkResult VmaBlockVector::AllocateFromBlock(
    10706  VmaDeviceMemoryBlock* pBlock,
    10707  VmaPool hCurrentPool,
    10708  uint32_t currentFrameIndex,
    10709  VkDeviceSize size,
    10710  VkDeviceSize alignment,
    10711  VmaAllocationCreateFlags allocFlags,
    10712  void* pUserData,
    10713  VmaSuballocationType suballocType,
    10714  uint32_t strategy,
    10715  VmaAllocation* pAllocation)
    10716 {
    10717  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10718  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10719  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10720  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10721 
    10722  VmaAllocationRequest currRequest = {};
    10723  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10724  currentFrameIndex,
    10725  m_FrameInUseCount,
    10726  m_BufferImageGranularity,
    10727  size,
    10728  alignment,
    10729  isUpperAddress,
    10730  suballocType,
    10731  false, // canMakeOtherLost
    10732  strategy,
    10733  &currRequest))
    10734  {
    10735  // Allocate from pCurrBlock.
    10736  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10737 
    10738  if(mapped)
    10739  {
    10740  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10741  if(res != VK_SUCCESS)
    10742  {
    10743  return res;
    10744  }
    10745  }
    10746 
    10747  // We no longer have an empty Allocation.
    10748  if(pBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752 
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBlock,
    10758  currRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBlock->Validate());
    10765  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10778 }
    10779 
    10780 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10781 {
    10782  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10783  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10784  allocInfo.allocationSize = blockSize;
    10785  VkDeviceMemory mem = VK_NULL_HANDLE;
    10786  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10787  if(res < 0)
    10788  {
    10789  return res;
    10790  }
    10791 
    10792  // New VkDeviceMemory successfully created.
    10793 
    10794  // Create new Allocation for it.
    10795  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10796  pBlock->Init(
    10797  m_hAllocator,
    10798  m_MemoryTypeIndex,
    10799  mem,
    10800  allocInfo.allocationSize,
    10801  m_NextBlockId++,
    10802  m_Algorithm);
    10803 
    10804  m_Blocks.push_back(pBlock);
    10805  if(pNewBlockIndex != VMA_NULL)
    10806  {
    10807  *pNewBlockIndex = m_Blocks.size() - 1;
    10808  }
    10809 
    10810  return VK_SUCCESS;
    10811 }
    10812 
    10813 #if VMA_STATS_STRING_ENABLED
    10814 
    10815 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10816 {
    10817  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10818 
    10819  json.BeginObject();
    10820 
    10821  if(m_IsCustomPool)
    10822  {
    10823  json.WriteString("MemoryTypeIndex");
    10824  json.WriteNumber(m_MemoryTypeIndex);
    10825 
    10826  json.WriteString("BlockSize");
    10827  json.WriteNumber(m_PreferredBlockSize);
    10828 
    10829  json.WriteString("BlockCount");
    10830  json.BeginObject(true);
    10831  if(m_MinBlockCount > 0)
    10832  {
    10833  json.WriteString("Min");
    10834  json.WriteNumber((uint64_t)m_MinBlockCount);
    10835  }
    10836  if(m_MaxBlockCount < SIZE_MAX)
    10837  {
    10838  json.WriteString("Max");
    10839  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10840  }
    10841  json.WriteString("Cur");
    10842  json.WriteNumber((uint64_t)m_Blocks.size());
    10843  json.EndObject();
    10844 
    10845  if(m_FrameInUseCount > 0)
    10846  {
    10847  json.WriteString("FrameInUseCount");
    10848  json.WriteNumber(m_FrameInUseCount);
    10849  }
    10850 
    10851  if(m_Algorithm != 0)
    10852  {
    10853  json.WriteString("Algorithm");
    10854  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10855  }
    10856  }
    10857  else
    10858  {
    10859  json.WriteString("PreferredBlockSize");
    10860  json.WriteNumber(m_PreferredBlockSize);
    10861  }
    10862 
    10863  json.WriteString("Blocks");
    10864  json.BeginObject();
    10865  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10866  {
    10867  json.BeginString();
    10868  json.ContinueString(m_Blocks[i]->GetId());
    10869  json.EndString();
    10870 
    10871  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10872  }
    10873  json.EndObject();
    10874 
    10875  json.EndObject();
    10876 }
    10877 
    10878 #endif // #if VMA_STATS_STRING_ENABLED
    10879 
    10880 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10881  VmaAllocator hAllocator,
    10882  uint32_t currentFrameIndex)
    10883 {
    10884  if(m_pDefragmentator == VMA_NULL)
    10885  {
    10886  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10887  hAllocator,
    10888  this,
    10889  currentFrameIndex);
    10890  }
    10891 
    10892  return m_pDefragmentator;
    10893 }
    10894 
    10895 VkResult VmaBlockVector::Defragment(
    10896  VmaDefragmentationStats* pDefragmentationStats,
    10897  VkDeviceSize& maxBytesToMove,
    10898  uint32_t& maxAllocationsToMove)
    10899 {
    10900  if(m_pDefragmentator == VMA_NULL)
    10901  {
    10902  return VK_SUCCESS;
    10903  }
    10904 
    10905  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10906 
    10907  // Defragment.
    10908  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10909 
    10910  // Accumulate statistics.
    10911  if(pDefragmentationStats != VMA_NULL)
    10912  {
    10913  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10914  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10915  pDefragmentationStats->bytesMoved += bytesMoved;
    10916  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10917  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10918  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10919  maxBytesToMove -= bytesMoved;
    10920  maxAllocationsToMove -= allocationsMoved;
    10921  }
    10922 
    10923  // Free empty blocks.
    10924  m_HasEmptyBlock = false;
    10925  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10926  {
    10927  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10928  if(pBlock->m_pMetadata->IsEmpty())
    10929  {
    10930  if(m_Blocks.size() > m_MinBlockCount)
    10931  {
    10932  if(pDefragmentationStats != VMA_NULL)
    10933  {
    10934  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10935  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10936  }
    10937 
    10938  VmaVectorRemove(m_Blocks, blockIndex);
    10939  pBlock->Destroy(m_hAllocator);
    10940  vma_delete(m_hAllocator, pBlock);
    10941  }
    10942  else
    10943  {
    10944  m_HasEmptyBlock = true;
    10945  }
    10946  }
    10947  }
    10948 
    10949  return result;
    10950 }
    10951 
    10952 void VmaBlockVector::DestroyDefragmentator()
    10953 {
    10954  if(m_pDefragmentator != VMA_NULL)
    10955  {
    10956  vma_delete(m_hAllocator, m_pDefragmentator);
    10957  m_pDefragmentator = VMA_NULL;
    10958  }
    10959 }
    10960 
    10961 void VmaBlockVector::MakePoolAllocationsLost(
    10962  uint32_t currentFrameIndex,
    10963  size_t* pLostAllocationCount)
    10964 {
    10965  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10966  size_t lostAllocationCount = 0;
    10967  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10968  {
    10969  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10970  VMA_ASSERT(pBlock);
    10971  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10972  }
    10973  if(pLostAllocationCount != VMA_NULL)
    10974  {
    10975  *pLostAllocationCount = lostAllocationCount;
    10976  }
    10977 }
    10978 
    10979 VkResult VmaBlockVector::CheckCorruption()
    10980 {
    10981  if(!IsCorruptionDetectionEnabled())
    10982  {
    10983  return VK_ERROR_FEATURE_NOT_PRESENT;
    10984  }
    10985 
    10986  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10987  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10988  {
    10989  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10990  VMA_ASSERT(pBlock);
    10991  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10992  if(res != VK_SUCCESS)
    10993  {
    10994  return res;
    10995  }
    10996  }
    10997  return VK_SUCCESS;
    10998 }
    10999 
    11000 void VmaBlockVector::AddStats(VmaStats* pStats)
    11001 {
    11002  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11003  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11004 
    11005  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11006 
    11007  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11008  {
    11009  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11010  VMA_ASSERT(pBlock);
    11011  VMA_HEAVY_ASSERT(pBlock->Validate());
    11012  VmaStatInfo allocationStatInfo;
    11013  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11014  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11015  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11016  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11017  }
    11018 }
    11019 
    11021 // VmaDefragmentator members definition
    11022 
    11023 VmaDefragmentator::VmaDefragmentator(
    11024  VmaAllocator hAllocator,
    11025  VmaBlockVector* pBlockVector,
    11026  uint32_t currentFrameIndex) :
    11027  m_hAllocator(hAllocator),
    11028  m_pBlockVector(pBlockVector),
    11029  m_CurrentFrameIndex(currentFrameIndex),
    11030  m_BytesMoved(0),
    11031  m_AllocationsMoved(0),
    11032  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11033  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11034 {
    11035  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11036 }
    11037 
    11038 VmaDefragmentator::~VmaDefragmentator()
    11039 {
    11040  for(size_t i = m_Blocks.size(); i--; )
    11041  {
    11042  vma_delete(m_hAllocator, m_Blocks[i]);
    11043  }
    11044 }
    11045 
    11046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11047 {
    11048  AllocationInfo allocInfo;
    11049  allocInfo.m_hAllocation = hAlloc;
    11050  allocInfo.m_pChanged = pChanged;
    11051  m_Allocations.push_back(allocInfo);
    11052 }
    11053 
    11054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11055 {
    11056  // It has already been mapped for defragmentation.
    11057  if(m_pMappedDataForDefragmentation)
    11058  {
    11059  *ppMappedData = m_pMappedDataForDefragmentation;
    11060  return VK_SUCCESS;
    11061  }
    11062 
    11063  // It is originally mapped.
    11064  if(m_pBlock->GetMappedData())
    11065  {
    11066  *ppMappedData = m_pBlock->GetMappedData();
    11067  return VK_SUCCESS;
    11068  }
    11069 
    11070  // Map on first usage.
    11071  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return res;
    11074 }
    11075 
    11076 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11077 {
    11078  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11079  {
    11080  m_pBlock->Unmap(hAllocator, 1);
    11081  }
    11082 }
    11083 
    11084 VkResult VmaDefragmentator::DefragmentRound(
    11085  VkDeviceSize maxBytesToMove,
    11086  uint32_t maxAllocationsToMove)
    11087 {
    11088  if(m_Blocks.empty())
    11089  {
    11090  return VK_SUCCESS;
    11091  }
    11092 
    11093  size_t srcBlockIndex = m_Blocks.size() - 1;
    11094  size_t srcAllocIndex = SIZE_MAX;
    11095  for(;;)
    11096  {
    11097  // 1. Find next allocation to move.
    11098  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11099  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11100  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11101  {
    11102  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11103  {
    11104  // Finished: no more allocations to process.
    11105  if(srcBlockIndex == 0)
    11106  {
    11107  return VK_SUCCESS;
    11108  }
    11109  else
    11110  {
    11111  --srcBlockIndex;
    11112  srcAllocIndex = SIZE_MAX;
    11113  }
    11114  }
    11115  else
    11116  {
    11117  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11118  }
    11119  }
    11120 
    11121  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11122  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11123 
    11124  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11125  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11126  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11127  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11128 
    11129  // 2. Try to find new place for this allocation in preceding or current block.
    11130  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11131  {
    11132  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11133  VmaAllocationRequest dstAllocRequest;
    11134  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11135  m_CurrentFrameIndex,
    11136  m_pBlockVector->GetFrameInUseCount(),
    11137  m_pBlockVector->GetBufferImageGranularity(),
    11138  size,
    11139  alignment,
    11140  false, // upperAddress
    11141  suballocType,
    11142  false, // canMakeOtherLost
    11144  &dstAllocRequest) &&
    11145  MoveMakesSense(
    11146  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11147  {
    11148  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11149 
    11150  // Reached limit on number of allocations or bytes to move.
    11151  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11152  (m_BytesMoved + size > maxBytesToMove))
    11153  {
    11154  return VK_INCOMPLETE;
    11155  }
    11156 
    11157  void* pDstMappedData = VMA_NULL;
    11158  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11159  if(res != VK_SUCCESS)
    11160  {
    11161  return res;
    11162  }
    11163 
    11164  void* pSrcMappedData = VMA_NULL;
    11165  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11166  if(res != VK_SUCCESS)
    11167  {
    11168  return res;
    11169  }
    11170 
    11171  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11172  memcpy(
    11173  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11174  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11175  static_cast<size_t>(size));
    11176 
    11177  if(VMA_DEBUG_MARGIN > 0)
    11178  {
    11179  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11180  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11181  }
    11182 
    11183  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11184  dstAllocRequest,
    11185  suballocType,
    11186  size,
    11187  false, // upperAddress
    11188  allocInfo.m_hAllocation);
    11189  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11190 
    11191  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11192 
    11193  if(allocInfo.m_pChanged != VMA_NULL)
    11194  {
    11195  *allocInfo.m_pChanged = VK_TRUE;
    11196  }
    11197 
    11198  ++m_AllocationsMoved;
    11199  m_BytesMoved += size;
    11200 
    11201  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11202 
    11203  break;
    11204  }
    11205  }
    11206 
    11207  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11208 
    11209  if(srcAllocIndex > 0)
    11210  {
    11211  --srcAllocIndex;
    11212  }
    11213  else
    11214  {
    11215  if(srcBlockIndex > 0)
    11216  {
    11217  --srcBlockIndex;
    11218  srcAllocIndex = SIZE_MAX;
    11219  }
    11220  else
    11221  {
    11222  return VK_SUCCESS;
    11223  }
    11224  }
    11225  }
    11226 }
    11227 
    11228 VkResult VmaDefragmentator::Defragment(
    11229  VkDeviceSize maxBytesToMove,
    11230  uint32_t maxAllocationsToMove)
    11231 {
    11232  if(m_Allocations.empty())
    11233  {
    11234  return VK_SUCCESS;
    11235  }
    11236 
    11237  // Create block info for each block.
    11238  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11239  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11240  {
    11241  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11242  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11243  m_Blocks.push_back(pBlockInfo);
    11244  }
    11245 
    11246  // Sort them by m_pBlock pointer value.
    11247  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11248 
    11249  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11250  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11251  {
    11252  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11253  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11254  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11255  {
    11256  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11257  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11258  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11259  {
    11260  (*it)->m_Allocations.push_back(allocInfo);
    11261  }
    11262  else
    11263  {
    11264  VMA_ASSERT(0);
    11265  }
    11266  }
    11267  }
    11268  m_Allocations.clear();
    11269 
    11270  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11271  {
    11272  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11273  pBlockInfo->CalcHasNonMovableAllocations();
    11274  pBlockInfo->SortAllocationsBySizeDescecnding();
    11275  }
    11276 
    11277  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11278  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11279 
    11280  // Execute defragmentation rounds (the main part).
    11281  VkResult result = VK_SUCCESS;
    11282  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11283  {
    11284  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11285  }
    11286 
    11287  // Unmap blocks that were mapped for defragmentation.
    11288  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11289  {
    11290  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11291  }
    11292 
    11293  return result;
    11294 }
    11295 
    11296 bool VmaDefragmentator::MoveMakesSense(
    11297  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11298  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11299 {
    11300  if(dstBlockIndex < srcBlockIndex)
    11301  {
    11302  return true;
    11303  }
    11304  if(dstBlockIndex > srcBlockIndex)
    11305  {
    11306  return false;
    11307  }
    11308  if(dstOffset < srcOffset)
    11309  {
    11310  return true;
    11311  }
    11312  return false;
    11313 }
    11314 
    11316 // VmaRecorder
    11317 
    11318 #if VMA_RECORDING_ENABLED
    11319 
    11320 VmaRecorder::VmaRecorder() :
    11321  m_UseMutex(true),
    11322  m_Flags(0),
    11323  m_File(VMA_NULL),
    11324  m_Freq(INT64_MAX),
    11325  m_StartCounter(INT64_MAX)
    11326 {
    11327 }
    11328 
    11329 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11330 {
    11331  m_UseMutex = useMutex;
    11332  m_Flags = settings.flags;
    11333 
    11334  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11335  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11336 
    11337  // Open file for writing.
    11338  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11339  if(err != 0)
    11340  {
    11341  return VK_ERROR_INITIALIZATION_FAILED;
    11342  }
    11343 
    11344  // Write header.
    11345  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11346  fprintf(m_File, "%s\n", "1,3");
    11347 
    11348  return VK_SUCCESS;
    11349 }
    11350 
    11351 VmaRecorder::~VmaRecorder()
    11352 {
    11353  if(m_File != VMA_NULL)
    11354  {
    11355  fclose(m_File);
    11356  }
    11357 }
    11358 
    11359 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11360 {
    11361  CallParams callParams;
    11362  GetBasicParams(callParams);
    11363 
    11364  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11365  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11366  Flush();
    11367 }
    11368 
    11369 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11370 {
    11371  CallParams callParams;
    11372  GetBasicParams(callParams);
    11373 
    11374  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11375  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11376  Flush();
    11377 }
    11378 
    11379 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11380 {
    11381  CallParams callParams;
    11382  GetBasicParams(callParams);
    11383 
    11384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11385  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11386  createInfo.memoryTypeIndex,
    11387  createInfo.flags,
    11388  createInfo.blockSize,
    11389  (uint64_t)createInfo.minBlockCount,
    11390  (uint64_t)createInfo.maxBlockCount,
    11391  createInfo.frameInUseCount,
    11392  pool);
    11393  Flush();
    11394 }
    11395 
    11396 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11397 {
    11398  CallParams callParams;
    11399  GetBasicParams(callParams);
    11400 
    11401  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11402  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11403  pool);
    11404  Flush();
    11405 }
    11406 
    11407 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11408  const VkMemoryRequirements& vkMemReq,
    11409  const VmaAllocationCreateInfo& createInfo,
    11410  VmaAllocation allocation)
    11411 {
    11412  CallParams callParams;
    11413  GetBasicParams(callParams);
    11414 
    11415  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11416  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11417  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11418  vkMemReq.size,
    11419  vkMemReq.alignment,
    11420  vkMemReq.memoryTypeBits,
    11421  createInfo.flags,
    11422  createInfo.usage,
    11423  createInfo.requiredFlags,
    11424  createInfo.preferredFlags,
    11425  createInfo.memoryTypeBits,
    11426  createInfo.pool,
    11427  allocation,
    11428  userDataStr.GetString());
    11429  Flush();
    11430 }
    11431 
    11432 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11433  const VkMemoryRequirements& vkMemReq,
    11434  bool requiresDedicatedAllocation,
    11435  bool prefersDedicatedAllocation,
    11436  const VmaAllocationCreateInfo& createInfo,
    11437  VmaAllocation allocation)
    11438 {
    11439  CallParams callParams;
    11440  GetBasicParams(callParams);
    11441 
    11442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11443  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11444  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11445  vkMemReq.size,
    11446  vkMemReq.alignment,
    11447  vkMemReq.memoryTypeBits,
    11448  requiresDedicatedAllocation ? 1 : 0,
    11449  prefersDedicatedAllocation ? 1 : 0,
    11450  createInfo.flags,
    11451  createInfo.usage,
    11452  createInfo.requiredFlags,
    11453  createInfo.preferredFlags,
    11454  createInfo.memoryTypeBits,
    11455  createInfo.pool,
    11456  allocation,
    11457  userDataStr.GetString());
    11458  Flush();
    11459 }
    11460 
    11461 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11462  const VkMemoryRequirements& vkMemReq,
    11463  bool requiresDedicatedAllocation,
    11464  bool prefersDedicatedAllocation,
    11465  const VmaAllocationCreateInfo& createInfo,
    11466  VmaAllocation allocation)
    11467 {
    11468  CallParams callParams;
    11469  GetBasicParams(callParams);
    11470 
    11471  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11472  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11473  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11474  vkMemReq.size,
    11475  vkMemReq.alignment,
    11476  vkMemReq.memoryTypeBits,
    11477  requiresDedicatedAllocation ? 1 : 0,
    11478  prefersDedicatedAllocation ? 1 : 0,
    11479  createInfo.flags,
    11480  createInfo.usage,
    11481  createInfo.requiredFlags,
    11482  createInfo.preferredFlags,
    11483  createInfo.memoryTypeBits,
    11484  createInfo.pool,
    11485  allocation,
    11486  userDataStr.GetString());
    11487  Flush();
    11488 }
    11489 
    11490 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11491  VmaAllocation allocation)
    11492 {
    11493  CallParams callParams;
    11494  GetBasicParams(callParams);
    11495 
    11496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11497  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11498  allocation);
    11499  Flush();
    11500 }
    11501 
    11502 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11503  VmaAllocation allocation,
    11504  const void* pUserData)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  UserDataString userDataStr(
    11511  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11512  pUserData);
    11513  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11514  allocation,
    11515  userDataStr.GetString());
    11516  Flush();
    11517 }
    11518 
    11519 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11520  VmaAllocation allocation)
    11521 {
    11522  CallParams callParams;
    11523  GetBasicParams(callParams);
    11524 
    11525  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation);
    11528  Flush();
    11529 }
    11530 
    11531 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11532  VmaAllocation allocation)
    11533 {
    11534  CallParams callParams;
    11535  GetBasicParams(callParams);
    11536 
    11537  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11538  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11539  allocation);
    11540  Flush();
    11541 }
    11542 
    11543 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11544  VmaAllocation allocation)
    11545 {
    11546  CallParams callParams;
    11547  GetBasicParams(callParams);
    11548 
    11549  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11550  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11551  allocation);
    11552  Flush();
    11553 }
    11554 
    11555 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11556  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11557 {
    11558  CallParams callParams;
    11559  GetBasicParams(callParams);
    11560 
    11561  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11562  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11563  allocation,
    11564  offset,
    11565  size);
    11566  Flush();
    11567 }
    11568 
    11569 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11570  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11571 {
    11572  CallParams callParams;
    11573  GetBasicParams(callParams);
    11574 
    11575  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11576  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11577  allocation,
    11578  offset,
    11579  size);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11584  const VkBufferCreateInfo& bufCreateInfo,
    11585  const VmaAllocationCreateInfo& allocCreateInfo,
    11586  VmaAllocation allocation)
    11587 {
    11588  CallParams callParams;
    11589  GetBasicParams(callParams);
    11590 
    11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11592  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11593  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11594  bufCreateInfo.flags,
    11595  bufCreateInfo.size,
    11596  bufCreateInfo.usage,
    11597  bufCreateInfo.sharingMode,
    11598  allocCreateInfo.flags,
    11599  allocCreateInfo.usage,
    11600  allocCreateInfo.requiredFlags,
    11601  allocCreateInfo.preferredFlags,
    11602  allocCreateInfo.memoryTypeBits,
    11603  allocCreateInfo.pool,
    11604  allocation,
    11605  userDataStr.GetString());
    11606  Flush();
    11607 }
    11608 
    11609 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11610  const VkImageCreateInfo& imageCreateInfo,
    11611  const VmaAllocationCreateInfo& allocCreateInfo,
    11612  VmaAllocation allocation)
    11613 {
    11614  CallParams callParams;
    11615  GetBasicParams(callParams);
    11616 
    11617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11618  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11619  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11620  imageCreateInfo.flags,
    11621  imageCreateInfo.imageType,
    11622  imageCreateInfo.format,
    11623  imageCreateInfo.extent.width,
    11624  imageCreateInfo.extent.height,
    11625  imageCreateInfo.extent.depth,
    11626  imageCreateInfo.mipLevels,
    11627  imageCreateInfo.arrayLayers,
    11628  imageCreateInfo.samples,
    11629  imageCreateInfo.tiling,
    11630  imageCreateInfo.usage,
    11631  imageCreateInfo.sharingMode,
    11632  imageCreateInfo.initialLayout,
    11633  allocCreateInfo.flags,
    11634  allocCreateInfo.usage,
    11635  allocCreateInfo.requiredFlags,
    11636  allocCreateInfo.preferredFlags,
    11637  allocCreateInfo.memoryTypeBits,
    11638  allocCreateInfo.pool,
    11639  allocation,
    11640  userDataStr.GetString());
    11641  Flush();
    11642 }
    11643 
    11644 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11645  VmaAllocation allocation)
    11646 {
    11647  CallParams callParams;
    11648  GetBasicParams(callParams);
    11649 
    11650  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11651  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11652  allocation);
    11653  Flush();
    11654 }
    11655 
    11656 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11657  VmaAllocation allocation)
    11658 {
    11659  CallParams callParams;
    11660  GetBasicParams(callParams);
    11661 
    11662  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11663  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11664  allocation);
    11665  Flush();
    11666 }
    11667 
    11668 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11676  allocation);
    11677  Flush();
    11678 }
    11679 
    11680 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11681  VmaAllocation allocation)
    11682 {
    11683  CallParams callParams;
    11684  GetBasicParams(callParams);
    11685 
    11686  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11687  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11688  allocation);
    11689  Flush();
    11690 }
    11691 
    11692 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11693  VmaPool pool)
    11694 {
    11695  CallParams callParams;
    11696  GetBasicParams(callParams);
    11697 
    11698  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11699  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11700  pool);
    11701  Flush();
    11702 }
    11703 
    11704 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11705 {
    11706  if(pUserData != VMA_NULL)
    11707  {
    11708  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11709  {
    11710  m_Str = (const char*)pUserData;
    11711  }
    11712  else
    11713  {
    11714  sprintf_s(m_PtrStr, "%p", pUserData);
    11715  m_Str = m_PtrStr;
    11716  }
    11717  }
    11718  else
    11719  {
    11720  m_Str = "";
    11721  }
    11722 }
    11723 
    11724 void VmaRecorder::WriteConfiguration(
    11725  const VkPhysicalDeviceProperties& devProps,
    11726  const VkPhysicalDeviceMemoryProperties& memProps,
    11727  bool dedicatedAllocationExtensionEnabled)
    11728 {
    11729  fprintf(m_File, "Config,Begin\n");
    11730 
    11731  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11732  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11733  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11734  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11735  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11736  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11737 
    11738  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11739  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11740  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11743  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11744  {
    11745  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11746  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11747  }
    11748  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11749  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11750  {
    11751  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11752  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11753  }
    11754 
    11755  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11756 
    11757  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11758  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11759  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11760  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11761  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11764  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11765  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11766 
    11767  fprintf(m_File, "Config,End\n");
    11768 }
    11769 
    11770 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11771 {
    11772  outParams.threadId = GetCurrentThreadId();
    11773 
    11774  LARGE_INTEGER counter;
    11775  QueryPerformanceCounter(&counter);
    11776  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11777 }
    11778 
    11779 void VmaRecorder::Flush()
    11780 {
    11781  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11782  {
    11783  fflush(m_File);
    11784  }
    11785 }
    11786 
    11787 #endif // #if VMA_RECORDING_ENABLED
    11788 
    11790 // VmaAllocator_T
    11791 
    11792 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11793  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11794  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11795  m_hDevice(pCreateInfo->device),
    11796  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11797  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11798  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11799  m_PreferredLargeHeapBlockSize(0),
    11800  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11801  m_CurrentFrameIndex(0),
    11802  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11803  m_NextPoolId(0)
    11805  ,m_pRecorder(VMA_NULL)
    11806 #endif
    11807 {
    11808  if(VMA_DEBUG_DETECT_CORRUPTION)
    11809  {
    11810  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11811  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11812  }
    11813 
    11814  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11815 
    11816 #if !(VMA_DEDICATED_ALLOCATION)
    11818  {
    11819  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11820  }
    11821 #endif
    11822 
    11823  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11824  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11825  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11826 
    11827  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11828  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11829 
    11830  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11831  {
    11832  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11833  }
    11834 
    11835  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11836  {
    11837  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11838  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11839  }
    11840 
    11841  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11842 
    11843  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11844  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11845 
    11846  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11847  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11848  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11849  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11850 
    11851  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11852  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11853 
    11854  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11855  {
    11856  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11857  {
    11858  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11859  if(limit != VK_WHOLE_SIZE)
    11860  {
    11861  m_HeapSizeLimit[heapIndex] = limit;
    11862  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11863  {
    11864  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11865  }
    11866  }
    11867  }
    11868  }
    11869 
    11870  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11871  {
    11872  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11873 
    11874  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11875  this,
    11876  memTypeIndex,
    11877  preferredBlockSize,
    11878  0,
    11879  SIZE_MAX,
    11880  GetBufferImageGranularity(),
    11881  pCreateInfo->frameInUseCount,
    11882  false, // isCustomPool
    11883  false, // explicitBlockSize
    11884  false); // linearAlgorithm
    11885  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11886  // becase minBlockCount is 0.
    11887  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11888 
    11889  }
    11890 }
    11891 
    11892 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11893 {
    11894  VkResult res = VK_SUCCESS;
    11895 
    11896  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11897  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11898  {
    11899 #if VMA_RECORDING_ENABLED
    11900  m_pRecorder = vma_new(this, VmaRecorder)();
    11901  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11902  if(res != VK_SUCCESS)
    11903  {
    11904  return res;
    11905  }
    11906  m_pRecorder->WriteConfiguration(
    11907  m_PhysicalDeviceProperties,
    11908  m_MemProps,
    11909  m_UseKhrDedicatedAllocation);
    11910  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11911 #else
    11912  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11913  return VK_ERROR_FEATURE_NOT_PRESENT;
    11914 #endif
    11915  }
    11916 
    11917  return res;
    11918 }
    11919 
    11920 VmaAllocator_T::~VmaAllocator_T()
    11921 {
    11922 #if VMA_RECORDING_ENABLED
    11923  if(m_pRecorder != VMA_NULL)
    11924  {
    11925  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11926  vma_delete(this, m_pRecorder);
    11927  }
    11928 #endif
    11929 
    11930  VMA_ASSERT(m_Pools.empty());
    11931 
    11932  for(size_t i = GetMemoryTypeCount(); i--; )
    11933  {
    11934  vma_delete(this, m_pDedicatedAllocations[i]);
    11935  vma_delete(this, m_pBlockVectors[i]);
    11936  }
    11937 }
    11938 
    11939 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11940 {
    11941 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11942  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11943  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11944  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11945  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11946  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11947  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11948  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11949  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11950  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11951  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11952  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11953  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11954  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11955  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11956  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11957  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11958 #if VMA_DEDICATED_ALLOCATION
    11959  if(m_UseKhrDedicatedAllocation)
    11960  {
    11961  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11962  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11963  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11964  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11965  }
    11966 #endif // #if VMA_DEDICATED_ALLOCATION
    11967 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11968 
    11969 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11970  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11971 
    11972  if(pVulkanFunctions != VMA_NULL)
    11973  {
    11974  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11975  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11976  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11977  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11978  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11979  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11980  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11981  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11982  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11986  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11987  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11988  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11989  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11990 #if VMA_DEDICATED_ALLOCATION
    11991  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11992  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11993 #endif
    11994  }
    11995 
    11996 #undef VMA_COPY_IF_NOT_NULL
    11997 
    11998  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    11999  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12000  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12001  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12002  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12003  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12004  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12016 #if VMA_DEDICATED_ALLOCATION
    12017  if(m_UseKhrDedicatedAllocation)
    12018  {
    12019  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12021  }
    12022 #endif
    12023 }
    12024 
    12025 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12026 {
    12027  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12028  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12029  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12030  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12031 }
    12032 
    12033 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12034  VkDeviceSize size,
    12035  VkDeviceSize alignment,
    12036  bool dedicatedAllocation,
    12037  VkBuffer dedicatedBuffer,
    12038  VkImage dedicatedImage,
    12039  const VmaAllocationCreateInfo& createInfo,
    12040  uint32_t memTypeIndex,
    12041  VmaSuballocationType suballocType,
    12042  VmaAllocation* pAllocation)
    12043 {
    12044  VMA_ASSERT(pAllocation != VMA_NULL);
    12045  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12046 
    12047  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12048 
    12049  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12050  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12051  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12052  {
    12053  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12054  }
    12055 
    12056  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12057  VMA_ASSERT(blockVector);
    12058 
    12059  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12060  bool preferDedicatedMemory =
    12061  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12062  dedicatedAllocation ||
    12063  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12064  size > preferredBlockSize / 2;
    12065 
    12066  if(preferDedicatedMemory &&
    12067  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12068  finalCreateInfo.pool == VK_NULL_HANDLE)
    12069  {
    12071  }
    12072 
    12073  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12074  {
    12075  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12076  {
    12077  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12078  }
    12079  else
    12080  {
    12081  return AllocateDedicatedMemory(
    12082  size,
    12083  suballocType,
    12084  memTypeIndex,
    12085  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12086  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12087  finalCreateInfo.pUserData,
    12088  dedicatedBuffer,
    12089  dedicatedImage,
    12090  pAllocation);
    12091  }
    12092  }
    12093  else
    12094  {
    12095  VkResult res = blockVector->Allocate(
    12096  VK_NULL_HANDLE, // hCurrentPool
    12097  m_CurrentFrameIndex.load(),
    12098  size,
    12099  alignment,
    12100  finalCreateInfo,
    12101  suballocType,
    12102  pAllocation);
    12103  if(res == VK_SUCCESS)
    12104  {
    12105  return res;
    12106  }
    12107 
    12108  // 5. Try dedicated memory.
    12109  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12110  {
    12111  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12112  }
    12113  else
    12114  {
    12115  res = AllocateDedicatedMemory(
    12116  size,
    12117  suballocType,
    12118  memTypeIndex,
    12119  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12120  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12121  finalCreateInfo.pUserData,
    12122  dedicatedBuffer,
    12123  dedicatedImage,
    12124  pAllocation);
    12125  if(res == VK_SUCCESS)
    12126  {
    12127  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12128  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12129  return VK_SUCCESS;
    12130  }
    12131  else
    12132  {
    12133  // Everything failed: Return error code.
    12134  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12135  return res;
    12136  }
    12137  }
    12138  }
    12139 }
    12140 
    12141 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12142  VkDeviceSize size,
    12143  VmaSuballocationType suballocType,
    12144  uint32_t memTypeIndex,
    12145  bool map,
    12146  bool isUserDataString,
    12147  void* pUserData,
    12148  VkBuffer dedicatedBuffer,
    12149  VkImage dedicatedImage,
    12150  VmaAllocation* pAllocation)
    12151 {
    12152  VMA_ASSERT(pAllocation);
    12153 
    12154  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12155  allocInfo.memoryTypeIndex = memTypeIndex;
    12156  allocInfo.allocationSize = size;
    12157 
    12158 #if VMA_DEDICATED_ALLOCATION
    12159  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12160  if(m_UseKhrDedicatedAllocation)
    12161  {
    12162  if(dedicatedBuffer != VK_NULL_HANDLE)
    12163  {
    12164  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12165  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12166  allocInfo.pNext = &dedicatedAllocInfo;
    12167  }
    12168  else if(dedicatedImage != VK_NULL_HANDLE)
    12169  {
    12170  dedicatedAllocInfo.image = dedicatedImage;
    12171  allocInfo.pNext = &dedicatedAllocInfo;
    12172  }
    12173  }
    12174 #endif // #if VMA_DEDICATED_ALLOCATION
    12175 
    12176  // Allocate VkDeviceMemory.
    12177  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12178  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12179  if(res < 0)
    12180  {
    12181  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12182  return res;
    12183  }
    12184 
    12185  void* pMappedData = VMA_NULL;
    12186  if(map)
    12187  {
    12188  res = (*m_VulkanFunctions.vkMapMemory)(
    12189  m_hDevice,
    12190  hMemory,
    12191  0,
    12192  VK_WHOLE_SIZE,
    12193  0,
    12194  &pMappedData);
    12195  if(res < 0)
    12196  {
    12197  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12198  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12199  return res;
    12200  }
    12201  }
    12202 
    12203  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12204  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12205  (*pAllocation)->SetUserData(this, pUserData);
    12206  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12207  {
    12208  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12209  }
    12210 
    12211  // Register it in m_pDedicatedAllocations.
    12212  {
    12213  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12214  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12215  VMA_ASSERT(pDedicatedAllocations);
    12216  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12217  }
    12218 
    12219  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12220 
    12221  return VK_SUCCESS;
    12222 }
    12223 
    12224 void VmaAllocator_T::GetBufferMemoryRequirements(
    12225  VkBuffer hBuffer,
    12226  VkMemoryRequirements& memReq,
    12227  bool& requiresDedicatedAllocation,
    12228  bool& prefersDedicatedAllocation) const
    12229 {
    12230 #if VMA_DEDICATED_ALLOCATION
    12231  if(m_UseKhrDedicatedAllocation)
    12232  {
    12233  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12234  memReqInfo.buffer = hBuffer;
    12235 
    12236  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12237 
    12238  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12239  memReq2.pNext = &memDedicatedReq;
    12240 
    12241  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12242 
    12243  memReq = memReq2.memoryRequirements;
    12244  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12245  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12246  }
    12247  else
    12248 #endif // #if VMA_DEDICATED_ALLOCATION
    12249  {
    12250  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12251  requiresDedicatedAllocation = false;
    12252  prefersDedicatedAllocation = false;
    12253  }
    12254 }
    12255 
    12256 void VmaAllocator_T::GetImageMemoryRequirements(
    12257  VkImage hImage,
    12258  VkMemoryRequirements& memReq,
    12259  bool& requiresDedicatedAllocation,
    12260  bool& prefersDedicatedAllocation) const
    12261 {
    12262 #if VMA_DEDICATED_ALLOCATION
    12263  if(m_UseKhrDedicatedAllocation)
    12264  {
    12265  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12266  memReqInfo.image = hImage;
    12267 
    12268  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12269 
    12270  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12271  memReq2.pNext = &memDedicatedReq;
    12272 
    12273  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12274 
    12275  memReq = memReq2.memoryRequirements;
    12276  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12277  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12278  }
    12279  else
    12280 #endif // #if VMA_DEDICATED_ALLOCATION
    12281  {
    12282  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12283  requiresDedicatedAllocation = false;
    12284  prefersDedicatedAllocation = false;
    12285  }
    12286 }
    12287 
    12288 VkResult VmaAllocator_T::AllocateMemory(
    12289  const VkMemoryRequirements& vkMemReq,
    12290  bool requiresDedicatedAllocation,
    12291  bool prefersDedicatedAllocation,
    12292  VkBuffer dedicatedBuffer,
    12293  VkImage dedicatedImage,
    12294  const VmaAllocationCreateInfo& createInfo,
    12295  VmaSuballocationType suballocType,
    12296  VmaAllocation* pAllocation)
    12297 {
    12298  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12299 
    12300  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12301  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12302  {
    12303  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12304  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12305  }
    12306  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12308  {
    12309  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12310  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12311  }
    12312  if(requiresDedicatedAllocation)
    12313  {
    12314  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12315  {
    12316  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12317  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12318  }
    12319  if(createInfo.pool != VK_NULL_HANDLE)
    12320  {
    12321  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12322  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12323  }
    12324  }
    12325  if((createInfo.pool != VK_NULL_HANDLE) &&
    12326  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12327  {
    12328  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331 
    12332  if(createInfo.pool != VK_NULL_HANDLE)
    12333  {
    12334  const VkDeviceSize alignmentForPool = VMA_MAX(
    12335  vkMemReq.alignment,
    12336  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12337  return createInfo.pool->m_BlockVector.Allocate(
    12338  createInfo.pool,
    12339  m_CurrentFrameIndex.load(),
    12340  vkMemReq.size,
    12341  alignmentForPool,
    12342  createInfo,
    12343  suballocType,
    12344  pAllocation);
    12345  }
    12346  else
    12347  {
    12348  // Bit mask of memory Vulkan types acceptable for this allocation.
    12349  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12350  uint32_t memTypeIndex = UINT32_MAX;
    12351  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12352  if(res == VK_SUCCESS)
    12353  {
    12354  VkDeviceSize alignmentForMemType = VMA_MAX(
    12355  vkMemReq.alignment,
    12356  GetMemoryTypeMinAlignment(memTypeIndex));
    12357 
    12358  res = AllocateMemoryOfType(
    12359  vkMemReq.size,
    12360  alignmentForMemType,
    12361  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12362  dedicatedBuffer,
    12363  dedicatedImage,
    12364  createInfo,
    12365  memTypeIndex,
    12366  suballocType,
    12367  pAllocation);
    12368  // Succeeded on first try.
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  return res;
    12372  }
    12373  // Allocation from this memory type failed. Try other compatible memory types.
    12374  else
    12375  {
    12376  for(;;)
    12377  {
    12378  // Remove old memTypeIndex from list of possibilities.
    12379  memoryTypeBits &= ~(1u << memTypeIndex);
    12380  // Find alternative memTypeIndex.
    12381  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12382  if(res == VK_SUCCESS)
    12383  {
    12384  alignmentForMemType = VMA_MAX(
    12385  vkMemReq.alignment,
    12386  GetMemoryTypeMinAlignment(memTypeIndex));
    12387 
    12388  res = AllocateMemoryOfType(
    12389  vkMemReq.size,
    12390  alignmentForMemType,
    12391  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12392  dedicatedBuffer,
    12393  dedicatedImage,
    12394  createInfo,
    12395  memTypeIndex,
    12396  suballocType,
    12397  pAllocation);
    12398  // Allocation from this alternative memory type succeeded.
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  return res;
    12402  }
    12403  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12404  }
    12405  // No other matching memory type index could be found.
    12406  else
    12407  {
    12408  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12409  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12410  }
    12411  }
    12412  }
    12413  }
    12414  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12415  else
    12416  return res;
    12417  }
    12418 }
    12419 
    12420 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12421 {
    12422  VMA_ASSERT(allocation);
    12423 
    12424  if(TouchAllocation(allocation))
    12425  {
    12426  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12427  {
    12428  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12429  }
    12430 
    12431  switch(allocation->GetType())
    12432  {
    12433  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12434  {
    12435  VmaBlockVector* pBlockVector = VMA_NULL;
    12436  VmaPool hPool = allocation->GetPool();
    12437  if(hPool != VK_NULL_HANDLE)
    12438  {
    12439  pBlockVector = &hPool->m_BlockVector;
    12440  }
    12441  else
    12442  {
    12443  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12444  pBlockVector = m_pBlockVectors[memTypeIndex];
    12445  }
    12446  pBlockVector->Free(allocation);
    12447  }
    12448  break;
    12449  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12450  FreeDedicatedMemory(allocation);
    12451  break;
    12452  default:
    12453  VMA_ASSERT(0);
    12454  }
    12455  }
    12456 
    12457  allocation->SetUserData(this, VMA_NULL);
    12458  vma_delete(this, allocation);
    12459 }
    12460 
    12461 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12462 {
    12463  // Initialize.
    12464  InitStatInfo(pStats->total);
    12465  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12466  InitStatInfo(pStats->memoryType[i]);
    12467  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12468  InitStatInfo(pStats->memoryHeap[i]);
    12469 
    12470  // Process default pools.
    12471  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12472  {
    12473  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12474  VMA_ASSERT(pBlockVector);
    12475  pBlockVector->AddStats(pStats);
    12476  }
    12477 
    12478  // Process custom pools.
    12479  {
    12480  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12481  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12482  {
    12483  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12484  }
    12485  }
    12486 
    12487  // Process dedicated allocations.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12491  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12492  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12493  VMA_ASSERT(pDedicatedAllocVector);
    12494  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12495  {
    12496  VmaStatInfo allocationStatInfo;
    12497  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12498  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12499  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12500  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12501  }
    12502  }
    12503 
    12504  // Postprocess.
    12505  VmaPostprocessCalcStatInfo(pStats->total);
    12506  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12507  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12508  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12509  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12510 }
    12511 
    12512 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12513 
    12514 VkResult VmaAllocator_T::Defragment(
    12515  VmaAllocation* pAllocations,
    12516  size_t allocationCount,
    12517  VkBool32* pAllocationsChanged,
    12518  const VmaDefragmentationInfo* pDefragmentationInfo,
    12519  VmaDefragmentationStats* pDefragmentationStats)
    12520 {
    12521  if(pAllocationsChanged != VMA_NULL)
    12522  {
    12523  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12524  }
    12525  if(pDefragmentationStats != VMA_NULL)
    12526  {
    12527  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12528  }
    12529 
    12530  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12531 
    12532  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12533 
    12534  const size_t poolCount = m_Pools.size();
    12535 
    12536  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12537  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12538  {
    12539  VmaAllocation hAlloc = pAllocations[allocIndex];
    12540  VMA_ASSERT(hAlloc);
    12541  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12542  // DedicatedAlloc cannot be defragmented.
    12543  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12544  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12545  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12546  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12547  // Lost allocation cannot be defragmented.
    12548  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12549  {
    12550  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12551 
    12552  const VmaPool hAllocPool = hAlloc->GetPool();
    12553  // This allocation belongs to custom pool.
    12554  if(hAllocPool != VK_NULL_HANDLE)
    12555  {
    12556  // Pools with linear or buddy algorithm are not defragmented.
    12557  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12558  {
    12559  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12560  }
    12561  }
    12562  // This allocation belongs to general pool.
    12563  else
    12564  {
    12565  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12566  }
    12567 
    12568  if(pAllocBlockVector != VMA_NULL)
    12569  {
    12570  VmaDefragmentator* const pDefragmentator =
    12571  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12572  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12573  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12574  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12575  }
    12576  }
    12577  }
    12578 
    12579  VkResult result = VK_SUCCESS;
    12580 
    12581  // ======== Main processing.
    12582 
    12583  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12584  uint32_t maxAllocationsToMove = UINT32_MAX;
    12585  if(pDefragmentationInfo != VMA_NULL)
    12586  {
    12587  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12588  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12589  }
    12590 
    12591  // Process standard memory.
    12592  for(uint32_t memTypeIndex = 0;
    12593  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12594  ++memTypeIndex)
    12595  {
    12596  // Only HOST_VISIBLE memory types can be defragmented.
    12597  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12598  {
    12599  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12600  pDefragmentationStats,
    12601  maxBytesToMove,
    12602  maxAllocationsToMove);
    12603  }
    12604  }
    12605 
    12606  // Process custom pools.
    12607  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12608  {
    12609  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12610  pDefragmentationStats,
    12611  maxBytesToMove,
    12612  maxAllocationsToMove);
    12613  }
    12614 
    12615  // ======== Destroy defragmentators.
    12616 
    12617  // Process custom pools.
    12618  for(size_t poolIndex = poolCount; poolIndex--; )
    12619  {
    12620  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12621  }
    12622 
    12623  // Process standard memory.
    12624  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12625  {
    12626  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12627  {
    12628  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12629  }
    12630  }
    12631 
    12632  return result;
    12633 }
    12634 
    12635 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12636 {
    12637  if(hAllocation->CanBecomeLost())
    12638  {
    12639  /*
    12640  Warning: This is a carefully designed algorithm.
    12641  Do not modify unless you really know what you're doing :)
    12642  */
    12643  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12644  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12645  for(;;)
    12646  {
    12647  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12648  {
    12649  pAllocationInfo->memoryType = UINT32_MAX;
    12650  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12651  pAllocationInfo->offset = 0;
    12652  pAllocationInfo->size = hAllocation->GetSize();
    12653  pAllocationInfo->pMappedData = VMA_NULL;
    12654  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12655  return;
    12656  }
    12657  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12658  {
    12659  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12660  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12661  pAllocationInfo->offset = hAllocation->GetOffset();
    12662  pAllocationInfo->size = hAllocation->GetSize();
    12663  pAllocationInfo->pMappedData = VMA_NULL;
    12664  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12665  return;
    12666  }
    12667  else // Last use time earlier than current time.
    12668  {
    12669  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12670  {
    12671  localLastUseFrameIndex = localCurrFrameIndex;
    12672  }
    12673  }
    12674  }
    12675  }
    12676  else
    12677  {
    12678 #if VMA_STATS_STRING_ENABLED
    12679  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12680  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12681  for(;;)
    12682  {
    12683  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12684  if(localLastUseFrameIndex == localCurrFrameIndex)
    12685  {
    12686  break;
    12687  }
    12688  else // Last use time earlier than current time.
    12689  {
    12690  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12691  {
    12692  localLastUseFrameIndex = localCurrFrameIndex;
    12693  }
    12694  }
    12695  }
    12696 #endif
    12697 
    12698  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12699  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12700  pAllocationInfo->offset = hAllocation->GetOffset();
    12701  pAllocationInfo->size = hAllocation->GetSize();
    12702  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12703  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12704  }
    12705 }
    12706 
    12707 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12708 {
    12709  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12710  if(hAllocation->CanBecomeLost())
    12711  {
    12712  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12713  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12714  for(;;)
    12715  {
    12716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12717  {
    12718  return false;
    12719  }
    12720  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12721  {
    12722  return true;
    12723  }
    12724  else // Last use time earlier than current time.
    12725  {
    12726  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12727  {
    12728  localLastUseFrameIndex = localCurrFrameIndex;
    12729  }
    12730  }
    12731  }
    12732  }
    12733  else
    12734  {
    12735 #if VMA_STATS_STRING_ENABLED
    12736  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12737  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12738  for(;;)
    12739  {
    12740  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12741  if(localLastUseFrameIndex == localCurrFrameIndex)
    12742  {
    12743  break;
    12744  }
    12745  else // Last use time earlier than current time.
    12746  {
    12747  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12748  {
    12749  localLastUseFrameIndex = localCurrFrameIndex;
    12750  }
    12751  }
    12752  }
    12753 #endif
    12754 
    12755  return true;
    12756  }
    12757 }
    12758 
    12759 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12760 {
    12761  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12762 
    12763  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12764 
    12765  if(newCreateInfo.maxBlockCount == 0)
    12766  {
    12767  newCreateInfo.maxBlockCount = SIZE_MAX;
    12768  }
    12769  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12770  {
    12771  return VK_ERROR_INITIALIZATION_FAILED;
    12772  }
    12773 
    12774  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12775 
    12776  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12777 
    12778  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12779  if(res != VK_SUCCESS)
    12780  {
    12781  vma_delete(this, *pPool);
    12782  *pPool = VMA_NULL;
    12783  return res;
    12784  }
    12785 
    12786  // Add to m_Pools.
    12787  {
    12788  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12789  (*pPool)->SetId(m_NextPoolId++);
    12790  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12791  }
    12792 
    12793  return VK_SUCCESS;
    12794 }
    12795 
    12796 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12797 {
    12798  // Remove from m_Pools.
    12799  {
    12800  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12801  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12802  VMA_ASSERT(success && "Pool not found in Allocator.");
    12803  }
    12804 
    12805  vma_delete(this, pool);
    12806 }
    12807 
    12808 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12809 {
    12810  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12811 }
    12812 
    12813 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12814 {
    12815  m_CurrentFrameIndex.store(frameIndex);
    12816 }
    12817 
    12818 void VmaAllocator_T::MakePoolAllocationsLost(
    12819  VmaPool hPool,
    12820  size_t* pLostAllocationCount)
    12821 {
    12822  hPool->m_BlockVector.MakePoolAllocationsLost(
    12823  m_CurrentFrameIndex.load(),
    12824  pLostAllocationCount);
    12825 }
    12826 
    12827 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12828 {
    12829  return hPool->m_BlockVector.CheckCorruption();
    12830 }
    12831 
    12832 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12833 {
    12834  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12835 
    12836  // Process default pools.
    12837  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12838  {
    12839  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12840  {
    12841  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12842  VMA_ASSERT(pBlockVector);
    12843  VkResult localRes = pBlockVector->CheckCorruption();
    12844  switch(localRes)
    12845  {
    12846  case VK_ERROR_FEATURE_NOT_PRESENT:
    12847  break;
    12848  case VK_SUCCESS:
    12849  finalRes = VK_SUCCESS;
    12850  break;
    12851  default:
    12852  return localRes;
    12853  }
    12854  }
    12855  }
    12856 
    12857  // Process custom pools.
    12858  {
    12859  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12860  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12861  {
    12862  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12863  {
    12864  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12865  switch(localRes)
    12866  {
    12867  case VK_ERROR_FEATURE_NOT_PRESENT:
    12868  break;
    12869  case VK_SUCCESS:
    12870  finalRes = VK_SUCCESS;
    12871  break;
    12872  default:
    12873  return localRes;
    12874  }
    12875  }
    12876  }
    12877  }
    12878 
    12879  return finalRes;
    12880 }
    12881 
    12882 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12883 {
    12884  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12885  (*pAllocation)->InitLost();
    12886 }
    12887 
    12888 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12889 {
    12890  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12891 
    12892  VkResult res;
    12893  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12894  {
    12895  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12896  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12897  {
    12898  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12899  if(res == VK_SUCCESS)
    12900  {
    12901  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12902  }
    12903  }
    12904  else
    12905  {
    12906  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12907  }
    12908  }
    12909  else
    12910  {
    12911  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12912  }
    12913 
    12914  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12915  {
    12916  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12917  }
    12918 
    12919  return res;
    12920 }
    12921 
    12922 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12923 {
    12924  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12925  {
    12926  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12927  }
    12928 
    12929  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12930 
    12931  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12932  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12933  {
    12934  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12935  m_HeapSizeLimit[heapIndex] += size;
    12936  }
    12937 }
    12938 
    12939 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12940 {
    12941  if(hAllocation->CanBecomeLost())
    12942  {
    12943  return VK_ERROR_MEMORY_MAP_FAILED;
    12944  }
    12945 
    12946  switch(hAllocation->GetType())
    12947  {
    12948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12949  {
    12950  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12951  char *pBytes = VMA_NULL;
    12952  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12953  if(res == VK_SUCCESS)
    12954  {
    12955  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12956  hAllocation->BlockAllocMap();
    12957  }
    12958  return res;
    12959  }
    12960  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12961  return hAllocation->DedicatedAllocMap(this, ppData);
    12962  default:
    12963  VMA_ASSERT(0);
    12964  return VK_ERROR_MEMORY_MAP_FAILED;
    12965  }
    12966 }
    12967 
    12968 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12969 {
    12970  switch(hAllocation->GetType())
    12971  {
    12972  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12973  {
    12974  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12975  hAllocation->BlockAllocUnmap();
    12976  pBlock->Unmap(this, 1);
    12977  }
    12978  break;
    12979  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12980  hAllocation->DedicatedAllocUnmap(this);
    12981  break;
    12982  default:
    12983  VMA_ASSERT(0);
    12984  }
    12985 }
    12986 
    12987 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12988 {
    12989  VkResult res = VK_SUCCESS;
    12990  switch(hAllocation->GetType())
    12991  {
    12992  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12993  res = GetVulkanFunctions().vkBindBufferMemory(
    12994  m_hDevice,
    12995  hBuffer,
    12996  hAllocation->GetMemory(),
    12997  0); //memoryOffset
    12998  break;
    12999  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13000  {
    13001  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13002  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13003  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13004  break;
    13005  }
    13006  default:
    13007  VMA_ASSERT(0);
    13008  }
    13009  return res;
    13010 }
    13011 
    13012 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13013 {
    13014  VkResult res = VK_SUCCESS;
    13015  switch(hAllocation->GetType())
    13016  {
    13017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13018  res = GetVulkanFunctions().vkBindImageMemory(
    13019  m_hDevice,
    13020  hImage,
    13021  hAllocation->GetMemory(),
    13022  0); //memoryOffset
    13023  break;
    13024  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13025  {
    13026  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13027  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13028  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13029  break;
    13030  }
    13031  default:
    13032  VMA_ASSERT(0);
    13033  }
    13034  return res;
    13035 }
    13036 
    13037 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13038  VmaAllocation hAllocation,
    13039  VkDeviceSize offset, VkDeviceSize size,
    13040  VMA_CACHE_OPERATION op)
    13041 {
    13042  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13043  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13044  {
    13045  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13046  VMA_ASSERT(offset <= allocationSize);
    13047 
    13048  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13049 
    13050  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13051  memRange.memory = hAllocation->GetMemory();
    13052 
    13053  switch(hAllocation->GetType())
    13054  {
    13055  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13056  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13057  if(size == VK_WHOLE_SIZE)
    13058  {
    13059  memRange.size = allocationSize - memRange.offset;
    13060  }
    13061  else
    13062  {
    13063  VMA_ASSERT(offset + size <= allocationSize);
    13064  memRange.size = VMA_MIN(
    13065  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13066  allocationSize - memRange.offset);
    13067  }
    13068  break;
    13069 
    13070  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13071  {
    13072  // 1. Still within this allocation.
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  size = allocationSize - offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  }
    13082  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13083 
    13084  // 2. Adjust to whole block.
    13085  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13086  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13087  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13088  memRange.offset += allocationOffset;
    13089  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13090 
    13091  break;
    13092  }
    13093 
    13094  default:
    13095  VMA_ASSERT(0);
    13096  }
    13097 
    13098  switch(op)
    13099  {
    13100  case VMA_CACHE_FLUSH:
    13101  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13102  break;
    13103  case VMA_CACHE_INVALIDATE:
    13104  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13105  break;
    13106  default:
    13107  VMA_ASSERT(0);
    13108  }
    13109  }
    13110  // else: Just ignore this call.
    13111 }
    13112 
    13113 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13114 {
    13115  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13116 
    13117  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13118  {
    13119  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13120  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13121  VMA_ASSERT(pDedicatedAllocations);
    13122  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13123  VMA_ASSERT(success);
    13124  }
    13125 
    13126  VkDeviceMemory hMemory = allocation->GetMemory();
    13127 
    13128  /*
    13129  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13130  before vkFreeMemory.
    13131 
    13132  if(allocation->GetMappedData() != VMA_NULL)
    13133  {
    13134  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13135  }
    13136  */
    13137 
    13138  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13139 
    13140  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13141 }
    13142 
    13143 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13144 {
    13145  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13146  !hAllocation->CanBecomeLost() &&
    13147  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13148  {
    13149  void* pData = VMA_NULL;
    13150  VkResult res = Map(hAllocation, &pData);
    13151  if(res == VK_SUCCESS)
    13152  {
    13153  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13154  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13155  Unmap(hAllocation);
    13156  }
    13157  else
    13158  {
    13159  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13160  }
    13161  }
    13162 }
    13163 
    13164 #if VMA_STATS_STRING_ENABLED
    13165 
    13166 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13167 {
    13168  bool dedicatedAllocationsStarted = false;
    13169  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13170  {
    13171  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13172  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13173  VMA_ASSERT(pDedicatedAllocVector);
    13174  if(pDedicatedAllocVector->empty() == false)
    13175  {
    13176  if(dedicatedAllocationsStarted == false)
    13177  {
    13178  dedicatedAllocationsStarted = true;
    13179  json.WriteString("DedicatedAllocations");
    13180  json.BeginObject();
    13181  }
    13182 
    13183  json.BeginString("Type ");
    13184  json.ContinueString(memTypeIndex);
    13185  json.EndString();
    13186 
    13187  json.BeginArray();
    13188 
    13189  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13190  {
    13191  json.BeginObject(true);
    13192  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13193  hAlloc->PrintParameters(json);
    13194  json.EndObject();
    13195  }
    13196 
    13197  json.EndArray();
    13198  }
    13199  }
    13200  if(dedicatedAllocationsStarted)
    13201  {
    13202  json.EndObject();
    13203  }
    13204 
    13205  {
    13206  bool allocationsStarted = false;
    13207  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13208  {
    13209  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13210  {
    13211  if(allocationsStarted == false)
    13212  {
    13213  allocationsStarted = true;
    13214  json.WriteString("DefaultPools");
    13215  json.BeginObject();
    13216  }
    13217 
    13218  json.BeginString("Type ");
    13219  json.ContinueString(memTypeIndex);
    13220  json.EndString();
    13221 
    13222  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13223  }
    13224  }
    13225  if(allocationsStarted)
    13226  {
    13227  json.EndObject();
    13228  }
    13229  }
    13230 
    13231  // Custom pools
    13232  {
    13233  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13234  const size_t poolCount = m_Pools.size();
    13235  if(poolCount > 0)
    13236  {
    13237  json.WriteString("Pools");
    13238  json.BeginObject();
    13239  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13240  {
    13241  json.BeginString();
    13242  json.ContinueString(m_Pools[poolIndex]->GetId());
    13243  json.EndString();
    13244 
    13245  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13246  }
    13247  json.EndObject();
    13248  }
    13249  }
    13250 }
    13251 
    13252 #endif // #if VMA_STATS_STRING_ENABLED
    13253 
    13255 // Public interface
    13256 
    13257 VkResult vmaCreateAllocator(
    13258  const VmaAllocatorCreateInfo* pCreateInfo,
    13259  VmaAllocator* pAllocator)
    13260 {
    13261  VMA_ASSERT(pCreateInfo && pAllocator);
    13262  VMA_DEBUG_LOG("vmaCreateAllocator");
    13263  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13264  return (*pAllocator)->Init(pCreateInfo);
    13265 }
    13266 
    13267 void vmaDestroyAllocator(
    13268  VmaAllocator allocator)
    13269 {
    13270  if(allocator != VK_NULL_HANDLE)
    13271  {
    13272  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13273  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13274  vma_delete(&allocationCallbacks, allocator);
    13275  }
    13276 }
    13277 
    13279  VmaAllocator allocator,
    13280  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13281 {
    13282  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13283  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13284 }
    13285 
    13287  VmaAllocator allocator,
    13288  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13289 {
    13290  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13291  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13292 }
    13293 
    13295  VmaAllocator allocator,
    13296  uint32_t memoryTypeIndex,
    13297  VkMemoryPropertyFlags* pFlags)
    13298 {
    13299  VMA_ASSERT(allocator && pFlags);
    13300  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13301  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13302 }
    13303 
    13305  VmaAllocator allocator,
    13306  uint32_t frameIndex)
    13307 {
    13308  VMA_ASSERT(allocator);
    13309  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13310 
    13311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13312 
    13313  allocator->SetCurrentFrameIndex(frameIndex);
    13314 }
    13315 
    13316 void vmaCalculateStats(
    13317  VmaAllocator allocator,
    13318  VmaStats* pStats)
    13319 {
    13320  VMA_ASSERT(allocator && pStats);
    13321  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13322  allocator->CalculateStats(pStats);
    13323 }
    13324 
    13325 #if VMA_STATS_STRING_ENABLED
    13326 
    13327 void vmaBuildStatsString(
    13328  VmaAllocator allocator,
    13329  char** ppStatsString,
    13330  VkBool32 detailedMap)
    13331 {
    13332  VMA_ASSERT(allocator && ppStatsString);
    13333  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13334 
    13335  VmaStringBuilder sb(allocator);
    13336  {
    13337  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13338  json.BeginObject();
    13339 
    13340  VmaStats stats;
    13341  allocator->CalculateStats(&stats);
    13342 
    13343  json.WriteString("Total");
    13344  VmaPrintStatInfo(json, stats.total);
    13345 
    13346  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13347  {
    13348  json.BeginString("Heap ");
    13349  json.ContinueString(heapIndex);
    13350  json.EndString();
    13351  json.BeginObject();
    13352 
    13353  json.WriteString("Size");
    13354  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13355 
    13356  json.WriteString("Flags");
    13357  json.BeginArray(true);
    13358  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13359  {
    13360  json.WriteString("DEVICE_LOCAL");
    13361  }
    13362  json.EndArray();
    13363 
    13364  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13365  {
    13366  json.WriteString("Stats");
    13367  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13368  }
    13369 
    13370  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13371  {
    13372  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13373  {
    13374  json.BeginString("Type ");
    13375  json.ContinueString(typeIndex);
    13376  json.EndString();
    13377 
    13378  json.BeginObject();
    13379 
    13380  json.WriteString("Flags");
    13381  json.BeginArray(true);
    13382  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13383  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13384  {
    13385  json.WriteString("DEVICE_LOCAL");
    13386  }
    13387  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13388  {
    13389  json.WriteString("HOST_VISIBLE");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_COHERENT");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_CACHED");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13400  {
    13401  json.WriteString("LAZILY_ALLOCATED");
    13402  }
    13403  json.EndArray();
    13404 
    13405  if(stats.memoryType[typeIndex].blockCount > 0)
    13406  {
    13407  json.WriteString("Stats");
    13408  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13409  }
    13410 
    13411  json.EndObject();
    13412  }
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  if(detailedMap == VK_TRUE)
    13418  {
    13419  allocator->PrintDetailedMap(json);
    13420  }
    13421 
    13422  json.EndObject();
    13423  }
    13424 
    13425  const size_t len = sb.GetLength();
    13426  char* const pChars = vma_new_array(allocator, char, len + 1);
    13427  if(len > 0)
    13428  {
    13429  memcpy(pChars, sb.GetData(), len);
    13430  }
    13431  pChars[len] = '\0';
    13432  *ppStatsString = pChars;
    13433 }
    13434 
    13435 void vmaFreeStatsString(
    13436  VmaAllocator allocator,
    13437  char* pStatsString)
    13438 {
    13439  if(pStatsString != VMA_NULL)
    13440  {
    13441  VMA_ASSERT(allocator);
    13442  size_t len = strlen(pStatsString);
    13443  vma_delete_array(allocator, pStatsString, len + 1);
    13444  }
    13445 }
    13446 
    13447 #endif // #if VMA_STATS_STRING_ENABLED
    13448 
    13449 /*
    13450 This function is not protected by any mutex because it just reads immutable data.
    13451 */
    13452 VkResult vmaFindMemoryTypeIndex(
    13453  VmaAllocator allocator,
    13454  uint32_t memoryTypeBits,
    13455  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13456  uint32_t* pMemoryTypeIndex)
    13457 {
    13458  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13459  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13460  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13461 
    13462  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13463  {
    13464  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13465  }
    13466 
    13467  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13468  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13469 
    13470  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13471  if(mapped)
    13472  {
    13473  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13474  }
    13475 
    13476  // Convert usage to requiredFlags and preferredFlags.
    13477  switch(pAllocationCreateInfo->usage)
    13478  {
    13480  break;
    13482  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13483  {
    13484  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13485  }
    13486  break;
    13488  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13489  break;
    13491  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13492  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13493  {
    13494  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13495  }
    13496  break;
    13498  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13499  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13500  break;
    13501  default:
    13502  break;
    13503  }
    13504 
    13505  *pMemoryTypeIndex = UINT32_MAX;
    13506  uint32_t minCost = UINT32_MAX;
    13507  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13508  memTypeIndex < allocator->GetMemoryTypeCount();
    13509  ++memTypeIndex, memTypeBit <<= 1)
    13510  {
    13511  // This memory type is acceptable according to memoryTypeBits bitmask.
    13512  if((memTypeBit & memoryTypeBits) != 0)
    13513  {
    13514  const VkMemoryPropertyFlags currFlags =
    13515  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13516  // This memory type contains requiredFlags.
    13517  if((requiredFlags & ~currFlags) == 0)
    13518  {
    13519  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13520  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13521  // Remember memory type with lowest cost.
    13522  if(currCost < minCost)
    13523  {
    13524  *pMemoryTypeIndex = memTypeIndex;
    13525  if(currCost == 0)
    13526  {
    13527  return VK_SUCCESS;
    13528  }
    13529  minCost = currCost;
    13530  }
    13531  }
    13532  }
    13533  }
    13534  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13535 }
    13536 
    13538  VmaAllocator allocator,
    13539  const VkBufferCreateInfo* pBufferCreateInfo,
    13540  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13541  uint32_t* pMemoryTypeIndex)
    13542 {
    13543  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13544  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13545  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13546  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13547 
    13548  const VkDevice hDev = allocator->m_hDevice;
    13549  VkBuffer hBuffer = VK_NULL_HANDLE;
    13550  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13551  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13552  if(res == VK_SUCCESS)
    13553  {
    13554  VkMemoryRequirements memReq = {};
    13555  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13556  hDev, hBuffer, &memReq);
    13557 
    13558  res = vmaFindMemoryTypeIndex(
    13559  allocator,
    13560  memReq.memoryTypeBits,
    13561  pAllocationCreateInfo,
    13562  pMemoryTypeIndex);
    13563 
    13564  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13565  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13566  }
    13567  return res;
    13568 }
    13569 
    13571  VmaAllocator allocator,
    13572  const VkImageCreateInfo* pImageCreateInfo,
    13573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13574  uint32_t* pMemoryTypeIndex)
    13575 {
    13576  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13577  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13578  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13579  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13580 
    13581  const VkDevice hDev = allocator->m_hDevice;
    13582  VkImage hImage = VK_NULL_HANDLE;
    13583  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13584  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13585  if(res == VK_SUCCESS)
    13586  {
    13587  VkMemoryRequirements memReq = {};
    13588  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13589  hDev, hImage, &memReq);
    13590 
    13591  res = vmaFindMemoryTypeIndex(
    13592  allocator,
    13593  memReq.memoryTypeBits,
    13594  pAllocationCreateInfo,
    13595  pMemoryTypeIndex);
    13596 
    13597  allocator->GetVulkanFunctions().vkDestroyImage(
    13598  hDev, hImage, allocator->GetAllocationCallbacks());
    13599  }
    13600  return res;
    13601 }
    13602 
    13603 VkResult vmaCreatePool(
    13604  VmaAllocator allocator,
    13605  const VmaPoolCreateInfo* pCreateInfo,
    13606  VmaPool* pPool)
    13607 {
    13608  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13609 
    13610  VMA_DEBUG_LOG("vmaCreatePool");
    13611 
    13612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13613 
    13614  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13615 
    13616 #if VMA_RECORDING_ENABLED
    13617  if(allocator->GetRecorder() != VMA_NULL)
    13618  {
    13619  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13620  }
    13621 #endif
    13622 
    13623  return res;
    13624 }
    13625 
    13626 void vmaDestroyPool(
    13627  VmaAllocator allocator,
    13628  VmaPool pool)
    13629 {
    13630  VMA_ASSERT(allocator);
    13631 
    13632  if(pool == VK_NULL_HANDLE)
    13633  {
    13634  return;
    13635  }
    13636 
    13637  VMA_DEBUG_LOG("vmaDestroyPool");
    13638 
    13639  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13640 
    13641 #if VMA_RECORDING_ENABLED
    13642  if(allocator->GetRecorder() != VMA_NULL)
    13643  {
    13644  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13645  }
    13646 #endif
    13647 
    13648  allocator->DestroyPool(pool);
    13649 }
    13650 
    13651 void vmaGetPoolStats(
    13652  VmaAllocator allocator,
    13653  VmaPool pool,
    13654  VmaPoolStats* pPoolStats)
    13655 {
    13656  VMA_ASSERT(allocator && pool && pPoolStats);
    13657 
    13658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13659 
    13660  allocator->GetPoolStats(pool, pPoolStats);
    13661 }
    13662 
    13664  VmaAllocator allocator,
    13665  VmaPool pool,
    13666  size_t* pLostAllocationCount)
    13667 {
    13668  VMA_ASSERT(allocator && pool);
    13669 
    13670  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13671 
    13672 #if VMA_RECORDING_ENABLED
    13673  if(allocator->GetRecorder() != VMA_NULL)
    13674  {
    13675  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13676  }
    13677 #endif
    13678 
    13679  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13680 }
    13681 
    13682 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13683 {
    13684  VMA_ASSERT(allocator && pool);
    13685 
    13686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13687 
    13688  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13689 
    13690  return allocator->CheckPoolCorruption(pool);
    13691 }
    13692 
    13693 VkResult vmaAllocateMemory(
    13694  VmaAllocator allocator,
    13695  const VkMemoryRequirements* pVkMemoryRequirements,
    13696  const VmaAllocationCreateInfo* pCreateInfo,
    13697  VmaAllocation* pAllocation,
    13698  VmaAllocationInfo* pAllocationInfo)
    13699 {
    13700  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13701 
    13702  VMA_DEBUG_LOG("vmaAllocateMemory");
    13703 
    13704  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13705 
    13706  VkResult result = allocator->AllocateMemory(
    13707  *pVkMemoryRequirements,
    13708  false, // requiresDedicatedAllocation
    13709  false, // prefersDedicatedAllocation
    13710  VK_NULL_HANDLE, // dedicatedBuffer
    13711  VK_NULL_HANDLE, // dedicatedImage
    13712  *pCreateInfo,
    13713  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13714  pAllocation);
    13715 
    13716 #if VMA_RECORDING_ENABLED
    13717  if(allocator->GetRecorder() != VMA_NULL)
    13718  {
    13719  allocator->GetRecorder()->RecordAllocateMemory(
    13720  allocator->GetCurrentFrameIndex(),
    13721  *pVkMemoryRequirements,
    13722  *pCreateInfo,
    13723  *pAllocation);
    13724  }
    13725 #endif
    13726 
    13727  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13728  {
    13729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13730  }
    13731 
    13732  return result;
    13733 }
    13734 
    13736  VmaAllocator allocator,
    13737  VkBuffer buffer,
    13738  const VmaAllocationCreateInfo* pCreateInfo,
    13739  VmaAllocation* pAllocation,
    13740  VmaAllocationInfo* pAllocationInfo)
    13741 {
    13742  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13743 
    13744  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13745 
    13746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13747 
    13748  VkMemoryRequirements vkMemReq = {};
    13749  bool requiresDedicatedAllocation = false;
    13750  bool prefersDedicatedAllocation = false;
    13751  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13752  requiresDedicatedAllocation,
    13753  prefersDedicatedAllocation);
    13754 
    13755  VkResult result = allocator->AllocateMemory(
    13756  vkMemReq,
    13757  requiresDedicatedAllocation,
    13758  prefersDedicatedAllocation,
    13759  buffer, // dedicatedBuffer
    13760  VK_NULL_HANDLE, // dedicatedImage
    13761  *pCreateInfo,
    13762  VMA_SUBALLOCATION_TYPE_BUFFER,
    13763  pAllocation);
    13764 
    13765 #if VMA_RECORDING_ENABLED
    13766  if(allocator->GetRecorder() != VMA_NULL)
    13767  {
    13768  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13769  allocator->GetCurrentFrameIndex(),
    13770  vkMemReq,
    13771  requiresDedicatedAllocation,
    13772  prefersDedicatedAllocation,
    13773  *pCreateInfo,
    13774  *pAllocation);
    13775  }
    13776 #endif
    13777 
    13778  if(pAllocationInfo && result == VK_SUCCESS)
    13779  {
    13780  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13781  }
    13782 
    13783  return result;
    13784 }
    13785 
    13786 VkResult vmaAllocateMemoryForImage(
    13787  VmaAllocator allocator,
    13788  VkImage image,
    13789  const VmaAllocationCreateInfo* pCreateInfo,
    13790  VmaAllocation* pAllocation,
    13791  VmaAllocationInfo* pAllocationInfo)
    13792 {
    13793  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13794 
    13795  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13796 
    13797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13798 
    13799  VkMemoryRequirements vkMemReq = {};
    13800  bool requiresDedicatedAllocation = false;
    13801  bool prefersDedicatedAllocation = false;
    13802  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13803  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13804 
    13805  VkResult result = allocator->AllocateMemory(
    13806  vkMemReq,
    13807  requiresDedicatedAllocation,
    13808  prefersDedicatedAllocation,
    13809  VK_NULL_HANDLE, // dedicatedBuffer
    13810  image, // dedicatedImage
    13811  *pCreateInfo,
    13812  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13813  pAllocation);
    13814 
    13815 #if VMA_RECORDING_ENABLED
    13816  if(allocator->GetRecorder() != VMA_NULL)
    13817  {
    13818  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13819  allocator->GetCurrentFrameIndex(),
    13820  vkMemReq,
    13821  requiresDedicatedAllocation,
    13822  prefersDedicatedAllocation,
    13823  *pCreateInfo,
    13824  *pAllocation);
    13825  }
    13826 #endif
    13827 
    13828  if(pAllocationInfo && result == VK_SUCCESS)
    13829  {
    13830  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13831  }
    13832 
    13833  return result;
    13834 }
    13835 
    13836 void vmaFreeMemory(
    13837  VmaAllocator allocator,
    13838  VmaAllocation allocation)
    13839 {
    13840  VMA_ASSERT(allocator);
    13841 
    13842  if(allocation == VK_NULL_HANDLE)
    13843  {
    13844  return;
    13845  }
    13846 
    13847  VMA_DEBUG_LOG("vmaFreeMemory");
    13848 
    13849  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13850 
    13851 #if VMA_RECORDING_ENABLED
    13852  if(allocator->GetRecorder() != VMA_NULL)
    13853  {
    13854  allocator->GetRecorder()->RecordFreeMemory(
    13855  allocator->GetCurrentFrameIndex(),
    13856  allocation);
    13857  }
    13858 #endif
    13859 
    13860  allocator->FreeMemory(allocation);
    13861 }
    13862 
    13864  VmaAllocator allocator,
    13865  VmaAllocation allocation,
    13866  VmaAllocationInfo* pAllocationInfo)
    13867 {
    13868  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13869 
    13870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordGetAllocationInfo(
    13876  allocator->GetCurrentFrameIndex(),
    13877  allocation);
    13878  }
    13879 #endif
    13880 
    13881  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13882 }
    13883 
    13884 VkBool32 vmaTouchAllocation(
    13885  VmaAllocator allocator,
    13886  VmaAllocation allocation)
    13887 {
    13888  VMA_ASSERT(allocator && allocation);
    13889 
    13890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13891 
    13892 #if VMA_RECORDING_ENABLED
    13893  if(allocator->GetRecorder() != VMA_NULL)
    13894  {
    13895  allocator->GetRecorder()->RecordTouchAllocation(
    13896  allocator->GetCurrentFrameIndex(),
    13897  allocation);
    13898  }
    13899 #endif
    13900 
    13901  return allocator->TouchAllocation(allocation);
    13902 }
    13903 
    13905  VmaAllocator allocator,
    13906  VmaAllocation allocation,
    13907  void* pUserData)
    13908 {
    13909  VMA_ASSERT(allocator && allocation);
    13910 
    13911  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13912 
    13913  allocation->SetUserData(allocator, pUserData);
    13914 
    13915 #if VMA_RECORDING_ENABLED
    13916  if(allocator->GetRecorder() != VMA_NULL)
    13917  {
    13918  allocator->GetRecorder()->RecordSetAllocationUserData(
    13919  allocator->GetCurrentFrameIndex(),
    13920  allocation,
    13921  pUserData);
    13922  }
    13923 #endif
    13924 }
    13925 
    13927  VmaAllocator allocator,
    13928  VmaAllocation* pAllocation)
    13929 {
    13930  VMA_ASSERT(allocator && pAllocation);
    13931 
    13932  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13933 
    13934  allocator->CreateLostAllocation(pAllocation);
    13935 
    13936 #if VMA_RECORDING_ENABLED
    13937  if(allocator->GetRecorder() != VMA_NULL)
    13938  {
    13939  allocator->GetRecorder()->RecordCreateLostAllocation(
    13940  allocator->GetCurrentFrameIndex(),
    13941  *pAllocation);
    13942  }
    13943 #endif
    13944 }
    13945 
    13946 VkResult vmaMapMemory(
    13947  VmaAllocator allocator,
    13948  VmaAllocation allocation,
    13949  void** ppData)
    13950 {
    13951  VMA_ASSERT(allocator && allocation && ppData);
    13952 
    13953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13954 
    13955  VkResult res = allocator->Map(allocation, ppData);
    13956 
    13957 #if VMA_RECORDING_ENABLED
    13958  if(allocator->GetRecorder() != VMA_NULL)
    13959  {
    13960  allocator->GetRecorder()->RecordMapMemory(
    13961  allocator->GetCurrentFrameIndex(),
    13962  allocation);
    13963  }
    13964 #endif
    13965 
    13966  return res;
    13967 }
    13968 
    13969 void vmaUnmapMemory(
    13970  VmaAllocator allocator,
    13971  VmaAllocation allocation)
    13972 {
    13973  VMA_ASSERT(allocator && allocation);
    13974 
    13975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13976 
    13977 #if VMA_RECORDING_ENABLED
    13978  if(allocator->GetRecorder() != VMA_NULL)
    13979  {
    13980  allocator->GetRecorder()->RecordUnmapMemory(
    13981  allocator->GetCurrentFrameIndex(),
    13982  allocation);
    13983  }
    13984 #endif
    13985 
    13986  allocator->Unmap(allocation);
    13987 }
    13988 
    13989 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13990 {
    13991  VMA_ASSERT(allocator && allocation);
    13992 
    13993  VMA_DEBUG_LOG("vmaFlushAllocation");
    13994 
    13995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13996 
    13997  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    13998 
    13999 #if VMA_RECORDING_ENABLED
    14000  if(allocator->GetRecorder() != VMA_NULL)
    14001  {
    14002  allocator->GetRecorder()->RecordFlushAllocation(
    14003  allocator->GetCurrentFrameIndex(),
    14004  allocation, offset, size);
    14005  }
    14006 #endif
    14007 }
    14008 
    14009 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14010 {
    14011  VMA_ASSERT(allocator && allocation);
    14012 
    14013  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14014 
    14015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14016 
    14017  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14018 
    14019 #if VMA_RECORDING_ENABLED
    14020  if(allocator->GetRecorder() != VMA_NULL)
    14021  {
    14022  allocator->GetRecorder()->RecordInvalidateAllocation(
    14023  allocator->GetCurrentFrameIndex(),
    14024  allocation, offset, size);
    14025  }
    14026 #endif
    14027 }
    14028 
    14029 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14030 {
    14031  VMA_ASSERT(allocator);
    14032 
    14033  VMA_DEBUG_LOG("vmaCheckCorruption");
    14034 
    14035  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14036 
    14037  return allocator->CheckCorruption(memoryTypeBits);
    14038 }
    14039 
    14040 VkResult vmaDefragment(
    14041  VmaAllocator allocator,
    14042  VmaAllocation* pAllocations,
    14043  size_t allocationCount,
    14044  VkBool32* pAllocationsChanged,
    14045  const VmaDefragmentationInfo *pDefragmentationInfo,
    14046  VmaDefragmentationStats* pDefragmentationStats)
    14047 {
    14048  VMA_ASSERT(allocator && pAllocations);
    14049 
    14050  VMA_DEBUG_LOG("vmaDefragment");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14055 }
    14056 
    14057 VkResult vmaBindBufferMemory(
    14058  VmaAllocator allocator,
    14059  VmaAllocation allocation,
    14060  VkBuffer buffer)
    14061 {
    14062  VMA_ASSERT(allocator && allocation && buffer);
    14063 
    14064  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14065 
    14066  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14067 
    14068  return allocator->BindBufferMemory(allocation, buffer);
    14069 }
    14070 
    14071 VkResult vmaBindImageMemory(
    14072  VmaAllocator allocator,
    14073  VmaAllocation allocation,
    14074  VkImage image)
    14075 {
    14076  VMA_ASSERT(allocator && allocation && image);
    14077 
    14078  VMA_DEBUG_LOG("vmaBindImageMemory");
    14079 
    14080  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14081 
    14082  return allocator->BindImageMemory(allocation, image);
    14083 }
    14084 
    14085 VkResult vmaCreateBuffer(
    14086  VmaAllocator allocator,
    14087  const VkBufferCreateInfo* pBufferCreateInfo,
    14088  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14089  VkBuffer* pBuffer,
    14090  VmaAllocation* pAllocation,
    14091  VmaAllocationInfo* pAllocationInfo)
    14092 {
    14093  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14094 
    14095  VMA_DEBUG_LOG("vmaCreateBuffer");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  *pBuffer = VK_NULL_HANDLE;
    14100  *pAllocation = VK_NULL_HANDLE;
    14101 
    14102  // 1. Create VkBuffer.
    14103  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14104  allocator->m_hDevice,
    14105  pBufferCreateInfo,
    14106  allocator->GetAllocationCallbacks(),
    14107  pBuffer);
    14108  if(res >= 0)
    14109  {
    14110  // 2. vkGetBufferMemoryRequirements.
    14111  VkMemoryRequirements vkMemReq = {};
    14112  bool requiresDedicatedAllocation = false;
    14113  bool prefersDedicatedAllocation = false;
    14114  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14115  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14116 
    14117  // Make sure alignment requirements for specific buffer usages reported
    14118  // in Physical Device Properties are included in alignment reported by memory requirements.
    14119  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14120  {
    14121  VMA_ASSERT(vkMemReq.alignment %
    14122  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14123  }
    14124  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14125  {
    14126  VMA_ASSERT(vkMemReq.alignment %
    14127  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14128  }
    14129  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14130  {
    14131  VMA_ASSERT(vkMemReq.alignment %
    14132  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14133  }
    14134 
    14135  // 3. Allocate memory using allocator.
    14136  res = allocator->AllocateMemory(
    14137  vkMemReq,
    14138  requiresDedicatedAllocation,
    14139  prefersDedicatedAllocation,
    14140  *pBuffer, // dedicatedBuffer
    14141  VK_NULL_HANDLE, // dedicatedImage
    14142  *pAllocationCreateInfo,
    14143  VMA_SUBALLOCATION_TYPE_BUFFER,
    14144  pAllocation);
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordCreateBuffer(
    14150  allocator->GetCurrentFrameIndex(),
    14151  *pBufferCreateInfo,
    14152  *pAllocationCreateInfo,
    14153  *pAllocation);
    14154  }
    14155 #endif
    14156 
    14157  if(res >= 0)
    14158  {
    14159  // 3. Bind buffer with memory.
    14160  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14161  if(res >= 0)
    14162  {
    14163  // All steps succeeded.
    14164  #if VMA_STATS_STRING_ENABLED
    14165  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14166  #endif
    14167  if(pAllocationInfo != VMA_NULL)
    14168  {
    14169  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14170  }
    14171 
    14172  return VK_SUCCESS;
    14173  }
    14174  allocator->FreeMemory(*pAllocation);
    14175  *pAllocation = VK_NULL_HANDLE;
    14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14177  *pBuffer = VK_NULL_HANDLE;
    14178  return res;
    14179  }
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  return res;
    14185 }
    14186 
    14187 void vmaDestroyBuffer(
    14188  VmaAllocator allocator,
    14189  VkBuffer buffer,
    14190  VmaAllocation allocation)
    14191 {
    14192  VMA_ASSERT(allocator);
    14193 
    14194  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14195  {
    14196  return;
    14197  }
    14198 
    14199  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14200 
    14201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14202 
    14203 #if VMA_RECORDING_ENABLED
    14204  if(allocator->GetRecorder() != VMA_NULL)
    14205  {
    14206  allocator->GetRecorder()->RecordDestroyBuffer(
    14207  allocator->GetCurrentFrameIndex(),
    14208  allocation);
    14209  }
    14210 #endif
    14211 
    14212  if(buffer != VK_NULL_HANDLE)
    14213  {
    14214  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14215  }
    14216 
    14217  if(allocation != VK_NULL_HANDLE)
    14218  {
    14219  allocator->FreeMemory(allocation);
    14220  }
    14221 }
    14222 
    14223 VkResult vmaCreateImage(
    14224  VmaAllocator allocator,
    14225  const VkImageCreateInfo* pImageCreateInfo,
    14226  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14227  VkImage* pImage,
    14228  VmaAllocation* pAllocation,
    14229  VmaAllocationInfo* pAllocationInfo)
    14230 {
    14231  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14232 
    14233  VMA_DEBUG_LOG("vmaCreateImage");
    14234 
    14235  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14236 
    14237  *pImage = VK_NULL_HANDLE;
    14238  *pAllocation = VK_NULL_HANDLE;
    14239 
    14240  // 1. Create VkImage.
    14241  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14242  allocator->m_hDevice,
    14243  pImageCreateInfo,
    14244  allocator->GetAllocationCallbacks(),
    14245  pImage);
    14246  if(res >= 0)
    14247  {
    14248  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14249  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14250  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14251 
    14252  // 2. Allocate memory using allocator.
    14253  VkMemoryRequirements vkMemReq = {};
    14254  bool requiresDedicatedAllocation = false;
    14255  bool prefersDedicatedAllocation = false;
    14256  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14257  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14258 
    14259  res = allocator->AllocateMemory(
    14260  vkMemReq,
    14261  requiresDedicatedAllocation,
    14262  prefersDedicatedAllocation,
    14263  VK_NULL_HANDLE, // dedicatedBuffer
    14264  *pImage, // dedicatedImage
    14265  *pAllocationCreateInfo,
    14266  suballocType,
    14267  pAllocation);
    14268 
    14269 #if VMA_RECORDING_ENABLED
    14270  if(allocator->GetRecorder() != VMA_NULL)
    14271  {
    14272  allocator->GetRecorder()->RecordCreateImage(
    14273  allocator->GetCurrentFrameIndex(),
    14274  *pImageCreateInfo,
    14275  *pAllocationCreateInfo,
    14276  *pAllocation);
    14277  }
    14278 #endif
    14279 
    14280  if(res >= 0)
    14281  {
    14282  // 3. Bind image with memory.
    14283  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14284  if(res >= 0)
    14285  {
    14286  // All steps succeeded.
    14287  #if VMA_STATS_STRING_ENABLED
    14288  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14289  #endif
    14290  if(pAllocationInfo != VMA_NULL)
    14291  {
    14292  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14293  }
    14294 
    14295  return VK_SUCCESS;
    14296  }
    14297  allocator->FreeMemory(*pAllocation);
    14298  *pAllocation = VK_NULL_HANDLE;
    14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14300  *pImage = VK_NULL_HANDLE;
    14301  return res;
    14302  }
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  return res;
    14308 }
    14309 
    14310 void vmaDestroyImage(
    14311  VmaAllocator allocator,
    14312  VkImage image,
    14313  VmaAllocation allocation)
    14314 {
    14315  VMA_ASSERT(allocator);
    14316 
    14317  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14318  {
    14319  return;
    14320  }
    14321 
    14322  VMA_DEBUG_LOG("vmaDestroyImage");
    14323 
    14324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14325 
    14326 #if VMA_RECORDING_ENABLED
    14327  if(allocator->GetRecorder() != VMA_NULL)
    14328  {
    14329  allocator->GetRecorder()->RecordDestroyImage(
    14330  allocator->GetCurrentFrameIndex(),
    14331  allocation);
    14332  }
    14333 #endif
    14334 
    14335  if(image != VK_NULL_HANDLE)
    14336  {
    14337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14338  }
    14339  if(allocation != VK_NULL_HANDLE)
    14340  {
    14341  allocator->FreeMemory(allocation);
    14342  }
    14343 }
    14344 
    14345 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1571
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1872
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1624
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1628
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1598
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2190
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1579
    +
    Definition: vk_mem_alloc.h:1602
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2194
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1583
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1825
    -
    Definition: vk_mem_alloc.h:1928
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1571
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2290
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1621
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2535
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2079
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1468
    +
    Definition: vk_mem_alloc.h:1829
    +
    Definition: vk_mem_alloc.h:1932
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1575
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2294
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2539
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2083
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1472
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2171
    -
    Definition: vk_mem_alloc.h:1905
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1560
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1978
    -
    Definition: vk_mem_alloc.h:1852
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1633
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2107
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2175
    +
    Definition: vk_mem_alloc.h:1909
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1564
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1982
    +
    Definition: vk_mem_alloc.h:1856
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1637
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2111
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1686
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1618
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1690
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1622
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1856
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1860
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1758
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1576
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1757
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2539
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1762
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1580
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1761
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2543
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1650
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1767
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2547
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1962
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2530
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1577
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1502
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1654
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1771
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2551
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1966
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2534
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1581
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1506
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1627
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1631
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2121
    -
    Definition: vk_mem_alloc.h:2115
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1693
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2300
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2125
    +
    Definition: vk_mem_alloc.h:2119
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1697
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2304
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1572
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1596
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:1999
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2141
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2177
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1576
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1600
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2003
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2145
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2181
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1558
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2124
    +
    Definition: vk_mem_alloc.h:1562
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2128
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1803
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1807
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2525
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2529
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2543
    -
    Definition: vk_mem_alloc.h:1842
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1986
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1575
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2547
    +
    Definition: vk_mem_alloc.h:1846
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1990
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1579
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1763
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1508
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1767
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1512
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1529
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1533
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1600
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1534
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2545
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1604
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1538
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2549
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1973
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2187
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1977
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2191
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1568
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1746
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2136
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1521
    -
    Definition: vk_mem_alloc.h:2111
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1572
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1750
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2140
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1525
    +
    Definition: vk_mem_alloc.h:2115
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1912
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1759
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1525
    -
    Definition: vk_mem_alloc.h:1936
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2127
    -
    Definition: vk_mem_alloc.h:1851
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1574
    +
    Definition: vk_mem_alloc.h:1916
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1763
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1529
    +
    Definition: vk_mem_alloc.h:1940
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2131
    +
    Definition: vk_mem_alloc.h:1855
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1578
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1968
    -
    Definition: vk_mem_alloc.h:1959
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1972
    +
    Definition: vk_mem_alloc.h:1963
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1749
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1570
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2149
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1636
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2180
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1957
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1992
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1753
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1574
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2153
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1640
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2184
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1961
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1996
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1674
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1765
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1892
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1758
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1678
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1769
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1896
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1762
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1581
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1606
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1523
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1580
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1585
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1610
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1527
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1584
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2163
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1573
    -
    Definition: vk_mem_alloc.h:1923
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2167
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1577
    +
    Definition: vk_mem_alloc.h:1927
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1614
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2314
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1630
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1758
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1755
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1618
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2318
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1634
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1762
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1759
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2168
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2172
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1932
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2295
    -
    Definition: vk_mem_alloc.h:1943
    -
    Definition: vk_mem_alloc.h:1955
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2541
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1566
    +
    Definition: vk_mem_alloc.h:1936
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2299
    +
    Definition: vk_mem_alloc.h:1947
    +
    Definition: vk_mem_alloc.h:1959
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2545
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1570
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1753
    -
    Definition: vk_mem_alloc.h:1808
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2117
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1757
    +
    Definition: vk_mem_alloc.h:1812
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2121
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1603
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1751
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1578
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1582
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1879
    -
    Definition: vk_mem_alloc.h:1950
    -
    Definition: vk_mem_alloc.h:1835
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2309
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1607
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1755
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1582
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1586
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1883
    +
    Definition: vk_mem_alloc.h:1954
    +
    Definition: vk_mem_alloc.h:1839
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2313
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1556
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1560
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1569
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2096
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2276
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1573
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2100
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2280
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1940
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2061
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1759
    +
    Definition: vk_mem_alloc.h:1944
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2065
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1763
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1590
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1766
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1594
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1770
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2174
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1759
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2178
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1763
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2281
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2285
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1464 /*
    1465 Define this macro to 0/1 to disable/enable support for recording functionality,
    1466 available through VmaAllocatorCreateInfo::pRecordSettings.
    1467 */
    1468 #ifndef VMA_RECORDING_ENABLED
    1469  #ifdef _WIN32
    1470  #define VMA_RECORDING_ENABLED 1
    1471  #else
    1472  #define VMA_RECORDING_ENABLED 0
    1473  #endif
    1474 #endif
    1475 
    1476 #ifndef NOMINMAX
    1477  #define NOMINMAX // For windows.h
    1478 #endif
    1479 
    1480 #include <vulkan/vulkan.h>
    1481 
    1482 #if VMA_RECORDING_ENABLED
    1483  #include <windows.h>
    1484 #endif
    1485 
    1486 #if !defined(VMA_DEDICATED_ALLOCATION)
    1487  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1488  #define VMA_DEDICATED_ALLOCATION 1
    1489  #else
    1490  #define VMA_DEDICATED_ALLOCATION 0
    1491  #endif
    1492 #endif
    1493 
    1503 VK_DEFINE_HANDLE(VmaAllocator)
    1504 
    1505 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1507  VmaAllocator allocator,
    1508  uint32_t memoryType,
    1509  VkDeviceMemory memory,
    1510  VkDeviceSize size);
    1512 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1513  VmaAllocator allocator,
    1514  uint32_t memoryType,
    1515  VkDeviceMemory memory,
    1516  VkDeviceSize size);
    1517 
    1531 
    1561 
    1564 typedef VkFlags VmaAllocatorCreateFlags;
    1565 
    1570 typedef struct VmaVulkanFunctions {
    1571  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1572  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1573  PFN_vkAllocateMemory vkAllocateMemory;
    1574  PFN_vkFreeMemory vkFreeMemory;
    1575  PFN_vkMapMemory vkMapMemory;
    1576  PFN_vkUnmapMemory vkUnmapMemory;
    1577  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1578  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1579  PFN_vkBindBufferMemory vkBindBufferMemory;
    1580  PFN_vkBindImageMemory vkBindImageMemory;
    1581  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1582  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1583  PFN_vkCreateBuffer vkCreateBuffer;
    1584  PFN_vkDestroyBuffer vkDestroyBuffer;
    1585  PFN_vkCreateImage vkCreateImage;
    1586  PFN_vkDestroyImage vkDestroyImage;
    1587 #if VMA_DEDICATED_ALLOCATION
    1588  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1589  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1590 #endif
    1592 
    1594 typedef enum VmaRecordFlagBits {
    1601 
    1604 typedef VkFlags VmaRecordFlags;
    1605 
    1607 typedef struct VmaRecordSettings
    1608 {
    1618  const char* pFilePath;
    1620 
    1623 {
    1627 
    1628  VkPhysicalDevice physicalDevice;
    1630 
    1631  VkDevice device;
    1633 
    1636 
    1637  const VkAllocationCallbacks* pAllocationCallbacks;
    1639 
    1678  const VkDeviceSize* pHeapSizeLimit;
    1699 
    1701 VkResult vmaCreateAllocator(
    1702  const VmaAllocatorCreateInfo* pCreateInfo,
    1703  VmaAllocator* pAllocator);
    1704 
    1706 void vmaDestroyAllocator(
    1707  VmaAllocator allocator);
    1708 
    1714  VmaAllocator allocator,
    1715  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1716 
    1722  VmaAllocator allocator,
    1723  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1724 
    1732  VmaAllocator allocator,
    1733  uint32_t memoryTypeIndex,
    1734  VkMemoryPropertyFlags* pFlags);
    1735 
    1745  VmaAllocator allocator,
    1746  uint32_t frameIndex);
    1747 
    1750 typedef struct VmaStatInfo
    1751 {
    1753  uint32_t blockCount;
    1759  VkDeviceSize usedBytes;
    1761  VkDeviceSize unusedBytes;
    1764 } VmaStatInfo;
    1765 
    1767 typedef struct VmaStats
    1768 {
    1769  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1770  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1772 } VmaStats;
    1773 
    1775 void vmaCalculateStats(
    1776  VmaAllocator allocator,
    1777  VmaStats* pStats);
    1778 
    1779 #define VMA_STATS_STRING_ENABLED 1
    1780 
    1781 #if VMA_STATS_STRING_ENABLED
    1782 
    1784 
    1786 void vmaBuildStatsString(
    1787  VmaAllocator allocator,
    1788  char** ppStatsString,
    1789  VkBool32 detailedMap);
    1790 
    1791 void vmaFreeStatsString(
    1792  VmaAllocator allocator,
    1793  char* pStatsString);
    1794 
    1795 #endif // #if VMA_STATS_STRING_ENABLED
    1796 
    1805 VK_DEFINE_HANDLE(VmaPool)
    1806 
    1807 typedef enum VmaMemoryUsage
    1808 {
    1857 } VmaMemoryUsage;
    1858 
    1873 
    1928 
    1941 
    1951 
    1958 
    1962 
    1964 {
    1977  VkMemoryPropertyFlags requiredFlags;
    1982  VkMemoryPropertyFlags preferredFlags;
    1990  uint32_t memoryTypeBits;
    2003  void* pUserData;
    2005 
    2022 VkResult vmaFindMemoryTypeIndex(
    2023  VmaAllocator allocator,
    2024  uint32_t memoryTypeBits,
    2025  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2026  uint32_t* pMemoryTypeIndex);
    2027 
    2041  VmaAllocator allocator,
    2042  const VkBufferCreateInfo* pBufferCreateInfo,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkImageCreateInfo* pImageCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2084 
    2101 
    2112 
    2118 
    2121 typedef VkFlags VmaPoolCreateFlags;
    2122 
    2125 typedef struct VmaPoolCreateInfo {
    2140  VkDeviceSize blockSize;
    2169 
    2172 typedef struct VmaPoolStats {
    2175  VkDeviceSize size;
    2178  VkDeviceSize unusedSize;
    2191  VkDeviceSize unusedRangeSizeMax;
    2194  size_t blockCount;
    2195 } VmaPoolStats;
    2196 
    2203 VkResult vmaCreatePool(
    2204  VmaAllocator allocator,
    2205  const VmaPoolCreateInfo* pCreateInfo,
    2206  VmaPool* pPool);
    2207 
    2210 void vmaDestroyPool(
    2211  VmaAllocator allocator,
    2212  VmaPool pool);
    2213 
    2220 void vmaGetPoolStats(
    2221  VmaAllocator allocator,
    2222  VmaPool pool,
    2223  VmaPoolStats* pPoolStats);
    2224 
    2232  VmaAllocator allocator,
    2233  VmaPool pool,
    2234  size_t* pLostAllocationCount);
    2235 
    2250 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2251 
    2276 VK_DEFINE_HANDLE(VmaAllocation)
    2277 
    2278 
    2280 typedef struct VmaAllocationInfo {
    2285  uint32_t memoryType;
    2294  VkDeviceMemory deviceMemory;
    2299  VkDeviceSize offset;
    2304  VkDeviceSize size;
    2318  void* pUserData;
    2320 
    2331 VkResult vmaAllocateMemory(
    2332  VmaAllocator allocator,
    2333  const VkMemoryRequirements* pVkMemoryRequirements,
    2334  const VmaAllocationCreateInfo* pCreateInfo,
    2335  VmaAllocation* pAllocation,
    2336  VmaAllocationInfo* pAllocationInfo);
    2337 
    2345  VmaAllocator allocator,
    2346  VkBuffer buffer,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2352 VkResult vmaAllocateMemoryForImage(
    2353  VmaAllocator allocator,
    2354  VkImage image,
    2355  const VmaAllocationCreateInfo* pCreateInfo,
    2356  VmaAllocation* pAllocation,
    2357  VmaAllocationInfo* pAllocationInfo);
    2358 
    2360 void vmaFreeMemory(
    2361  VmaAllocator allocator,
    2362  VmaAllocation allocation);
    2363 
    2381  VmaAllocator allocator,
    2382  VmaAllocation allocation,
    2383  VmaAllocationInfo* pAllocationInfo);
    2384 
    2399 VkBool32 vmaTouchAllocation(
    2400  VmaAllocator allocator,
    2401  VmaAllocation allocation);
    2402 
    2417  VmaAllocator allocator,
    2418  VmaAllocation allocation,
    2419  void* pUserData);
    2420 
    2432  VmaAllocator allocator,
    2433  VmaAllocation* pAllocation);
    2434 
    2469 VkResult vmaMapMemory(
    2470  VmaAllocator allocator,
    2471  VmaAllocation allocation,
    2472  void** ppData);
    2473 
    2478 void vmaUnmapMemory(
    2479  VmaAllocator allocator,
    2480  VmaAllocation allocation);
    2481 
    2494 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2495 
    2508 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2509 
    2526 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2527 
    2529 typedef struct VmaDefragmentationInfo {
    2534  VkDeviceSize maxBytesToMove;
    2541 
    2543 typedef struct VmaDefragmentationStats {
    2545  VkDeviceSize bytesMoved;
    2547  VkDeviceSize bytesFreed;
    2553 
    2592 VkResult vmaDefragment(
    2593  VmaAllocator allocator,
    2594  VmaAllocation* pAllocations,
    2595  size_t allocationCount,
    2596  VkBool32* pAllocationsChanged,
    2597  const VmaDefragmentationInfo *pDefragmentationInfo,
    2598  VmaDefragmentationStats* pDefragmentationStats);
    2599 
    2612 VkResult vmaBindBufferMemory(
    2613  VmaAllocator allocator,
    2614  VmaAllocation allocation,
    2615  VkBuffer buffer);
    2616 
    2629 VkResult vmaBindImageMemory(
    2630  VmaAllocator allocator,
    2631  VmaAllocation allocation,
    2632  VkImage image);
    2633 
    2660 VkResult vmaCreateBuffer(
    2661  VmaAllocator allocator,
    2662  const VkBufferCreateInfo* pBufferCreateInfo,
    2663  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2664  VkBuffer* pBuffer,
    2665  VmaAllocation* pAllocation,
    2666  VmaAllocationInfo* pAllocationInfo);
    2667 
    2679 void vmaDestroyBuffer(
    2680  VmaAllocator allocator,
    2681  VkBuffer buffer,
    2682  VmaAllocation allocation);
    2683 
    2685 VkResult vmaCreateImage(
    2686  VmaAllocator allocator,
    2687  const VkImageCreateInfo* pImageCreateInfo,
    2688  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2689  VkImage* pImage,
    2690  VmaAllocation* pAllocation,
    2691  VmaAllocationInfo* pAllocationInfo);
    2692 
    2704 void vmaDestroyImage(
    2705  VmaAllocator allocator,
    2706  VkImage image,
    2707  VmaAllocation allocation);
    2708 
    2709 #ifdef __cplusplus
    2710 }
    2711 #endif
    2712 
    2713 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2714 
    2715 // For Visual Studio IntelliSense.
    2716 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2717 #define VMA_IMPLEMENTATION
    2718 #endif
    2719 
    2720 #ifdef VMA_IMPLEMENTATION
    2721 #undef VMA_IMPLEMENTATION
    2722 
    2723 #include <cstdint>
    2724 #include <cstdlib>
    2725 #include <cstring>
    2726 
    2727 /*******************************************************************************
    2728 CONFIGURATION SECTION
    2729 
    2730 Define some of these macros before each #include of this header or change them
    2731 here if you need other then default behavior depending on your environment.
    2732 */
    2733 
    2734 /*
    2735 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2736 internally, like:
    2737 
    2738  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2739 
    2740 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2741 VmaAllocatorCreateInfo::pVulkanFunctions.
    2742 */
    2743 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2744 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2745 #endif
    2746 
    2747 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2748 //#define VMA_USE_STL_CONTAINERS 1
    2749 
    2750 /* Set this macro to 1 to make the library including and using STL containers:
    2751 std::pair, std::vector, std::list, std::unordered_map.
    2752 
    2753 Set it to 0 or undefined to make the library using its own implementation of
    2754 the containers.
    2755 */
    2756 #if VMA_USE_STL_CONTAINERS
    2757  #define VMA_USE_STL_VECTOR 1
    2758  #define VMA_USE_STL_UNORDERED_MAP 1
    2759  #define VMA_USE_STL_LIST 1
    2760 #endif
    2761 
    2762 #if VMA_USE_STL_VECTOR
    2763  #include <vector>
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_UNORDERED_MAP
    2767  #include <unordered_map>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_LIST
    2771  #include <list>
    2772 #endif
    2773 
    2774 /*
    2775 Following headers are used in this CONFIGURATION section only, so feel free to
    2776 remove them if not needed.
    2777 */
    2778 #include <cassert> // for assert
    2779 #include <algorithm> // for min, max
    2780 #include <mutex> // for std::mutex
    2781 #include <atomic> // for std::atomic
    2782 
    2783 #ifndef VMA_NULL
    2784  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2785  #define VMA_NULL nullptr
    2786 #endif
    2787 
    2788 #if defined(__APPLE__) || defined(__ANDROID__)
    2789 #include <cstdlib>
    2790 void *aligned_alloc(size_t alignment, size_t size)
    2791 {
    2792  // alignment must be >= sizeof(void*)
    2793  if(alignment < sizeof(void*))
    2794  {
    2795  alignment = sizeof(void*);
    2796  }
    2797 
    2798  void *pointer;
    2799  if(posix_memalign(&pointer, alignment, size) == 0)
    2800  return pointer;
    2801  return VMA_NULL;
    2802 }
    2803 #endif
    2804 
    2805 // If your compiler is not compatible with C++11 and definition of
    2806 // aligned_alloc() function is missing, uncommeting following line may help:
    2807 
    2808 //#include <malloc.h>
    2809 
    2810 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2811 #ifndef VMA_ASSERT
    2812  #ifdef _DEBUG
    2813  #define VMA_ASSERT(expr) assert(expr)
    2814  #else
    2815  #define VMA_ASSERT(expr)
    2816  #endif
    2817 #endif
    2818 
    2819 // Assert that will be called very often, like inside data structures e.g. operator[].
    2820 // Making it non-empty can make program slow.
    2821 #ifndef VMA_HEAVY_ASSERT
    2822  #ifdef _DEBUG
    2823  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2824  #else
    2825  #define VMA_HEAVY_ASSERT(expr)
    2826  #endif
    2827 #endif
    2828 
    2829 #ifndef VMA_ALIGN_OF
    2830  #define VMA_ALIGN_OF(type) (__alignof(type))
    2831 #endif
    2832 
    2833 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2834  #if defined(_WIN32)
    2835  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2836  #else
    2837  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2838  #endif
    2839 #endif
    2840 
    2841 #ifndef VMA_SYSTEM_FREE
    2842  #if defined(_WIN32)
    2843  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2844  #else
    2845  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2846  #endif
    2847 #endif
    2848 
    2849 #ifndef VMA_MIN
    2850  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2851 #endif
    2852 
    2853 #ifndef VMA_MAX
    2854  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_SWAP
    2858  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2859 #endif
    2860 
    2861 #ifndef VMA_SORT
    2862  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2863 #endif
    2864 
    2865 #ifndef VMA_DEBUG_LOG
    2866  #define VMA_DEBUG_LOG(format, ...)
    2867  /*
    2868  #define VMA_DEBUG_LOG(format, ...) do { \
    2869  printf(format, __VA_ARGS__); \
    2870  printf("\n"); \
    2871  } while(false)
    2872  */
    2873 #endif
    2874 
    2875 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2876 #if VMA_STATS_STRING_ENABLED
    2877  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2878  {
    2879  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2880  }
    2881  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2884  }
    2885  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2886  {
    2887  snprintf(outStr, strLen, "%p", ptr);
    2888  }
    2889 #endif
    2890 
    2891 #ifndef VMA_MUTEX
    2892  class VmaMutex
    2893  {
    2894  public:
    2895  VmaMutex() { }
    2896  ~VmaMutex() { }
    2897  void Lock() { m_Mutex.lock(); }
    2898  void Unlock() { m_Mutex.unlock(); }
    2899  private:
    2900  std::mutex m_Mutex;
    2901  };
    2902  #define VMA_MUTEX VmaMutex
    2903 #endif
    2904 
    2905 /*
    2906 If providing your own implementation, you need to implement a subset of std::atomic:
    2907 
    2908 - Constructor(uint32_t desired)
    2909 - uint32_t load() const
    2910 - void store(uint32_t desired)
    2911 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2912 */
    2913 #ifndef VMA_ATOMIC_UINT32
    2914  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2915 #endif
    2916 
    2917 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2918 
    2922  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2923 #endif
    2924 
    2925 #ifndef VMA_DEBUG_ALIGNMENT
    2926 
    2930  #define VMA_DEBUG_ALIGNMENT (1)
    2931 #endif
    2932 
    2933 #ifndef VMA_DEBUG_MARGIN
    2934 
    2938  #define VMA_DEBUG_MARGIN (0)
    2939 #endif
    2940 
    2941 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2942 
    2946  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2947 #endif
    2948 
    2949 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2950 
    2955  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2956 #endif
    2957 
    2958 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2959 
    2963  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2964 #endif
    2965 
    2966 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2967 
    2971  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2972 #endif
    2973 
    2974 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2975  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2980  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2982 #endif
    2983 
    2984 #ifndef VMA_CLASS_NO_COPY
    2985  #define VMA_CLASS_NO_COPY(className) \
    2986  private: \
    2987  className(const className&) = delete; \
    2988  className& operator=(const className&) = delete;
    2989 #endif
    2990 
    2991 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2992 
    2993 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2994 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2995 
    2996 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    2997 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    2998 
    2999 /*******************************************************************************
    3000 END OF CONFIGURATION
    3001 */
    3002 
    3003 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3004  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3005 
    3006 // Returns number of bits set to 1 in (v).
    3007 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3008 {
    3009  uint32_t c = v - ((v >> 1) & 0x55555555);
    3010  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3011  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3012  c = ((c >> 8) + c) & 0x00FF00FF;
    3013  c = ((c >> 16) + c) & 0x0000FFFF;
    3014  return c;
    3015 }
    3016 
    3017 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3018 // Use types like uint32_t, uint64_t as T.
    3019 template <typename T>
    3020 static inline T VmaAlignUp(T val, T align)
    3021 {
    3022  return (val + align - 1) / align * align;
    3023 }
    3024 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3025 // Use types like uint32_t, uint64_t as T.
    3026 template <typename T>
    3027 static inline T VmaAlignDown(T val, T align)
    3028 {
    3029  return val / align * align;
    3030 }
    3031 
    3032 // Division with mathematical rounding to nearest number.
    3033 template <typename T>
    3034 static inline T VmaRoundDiv(T x, T y)
    3035 {
    3036  return (x + (y / (T)2)) / y;
    3037 }
    3038 
    3039 /*
    3040 Returns true if given number is a power of two.
    3041 T must be unsigned integer number or signed integer but always nonnegative.
    3042 For 0 returns true.
    3043 */
    3044 template <typename T>
    3045 inline bool VmaIsPow2(T x)
    3046 {
    3047  return (x & (x-1)) == 0;
    3048 }
    3049 
    3050 // Returns smallest power of 2 greater or equal to v.
    3051 static inline uint32_t VmaNextPow2(uint32_t v)
    3052 {
    3053  v--;
    3054  v |= v >> 1;
    3055  v |= v >> 2;
    3056  v |= v >> 4;
    3057  v |= v >> 8;
    3058  v |= v >> 16;
    3059  v++;
    3060  return v;
    3061 }
    3062 static inline uint64_t VmaNextPow2(uint64_t v)
    3063 {
    3064  v--;
    3065  v |= v >> 1;
    3066  v |= v >> 2;
    3067  v |= v >> 4;
    3068  v |= v >> 8;
    3069  v |= v >> 16;
    3070  v |= v >> 32;
    3071  v++;
    3072  return v;
    3073 }
    3074 
    3075 // Returns largest power of 2 less or equal to v.
    3076 static inline uint32_t VmaPrevPow2(uint32_t v)
    3077 {
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v = v ^ (v >> 1);
    3084  return v;
    3085 }
    3086 static inline uint64_t VmaPrevPow2(uint64_t v)
    3087 {
    3088  v |= v >> 1;
    3089  v |= v >> 2;
    3090  v |= v >> 4;
    3091  v |= v >> 8;
    3092  v |= v >> 16;
    3093  v |= v >> 32;
    3094  v = v ^ (v >> 1);
    3095  return v;
    3096 }
    3097 
    3098 static inline bool VmaStrIsEmpty(const char* pStr)
    3099 {
    3100  return pStr == VMA_NULL || *pStr == '\0';
    3101 }
    3102 
    3103 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3104 {
    3105  switch(algorithm)
    3106  {
    3108  return "Linear";
    3110  return "Buddy";
    3111  case 0:
    3112  return "Default";
    3113  default:
    3114  VMA_ASSERT(0);
    3115  return "";
    3116  }
    3117 }
    3118 
    3119 #ifndef VMA_SORT
    3120 
    3121 template<typename Iterator, typename Compare>
    3122 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3123 {
    3124  Iterator centerValue = end; --centerValue;
    3125  Iterator insertIndex = beg;
    3126  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3127  {
    3128  if(cmp(*memTypeIndex, *centerValue))
    3129  {
    3130  if(insertIndex != memTypeIndex)
    3131  {
    3132  VMA_SWAP(*memTypeIndex, *insertIndex);
    3133  }
    3134  ++insertIndex;
    3135  }
    3136  }
    3137  if(insertIndex != centerValue)
    3138  {
    3139  VMA_SWAP(*insertIndex, *centerValue);
    3140  }
    3141  return insertIndex;
    3142 }
    3143 
    3144 template<typename Iterator, typename Compare>
    3145 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3146 {
    3147  if(beg < end)
    3148  {
    3149  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3150  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3151  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3152  }
    3153 }
    3154 
    3155 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3156 
    3157 #endif // #ifndef VMA_SORT
    3158 
    3159 /*
    3160 Returns true if two memory blocks occupy overlapping pages.
    3161 ResourceA must be in less memory offset than ResourceB.
    3162 
    3163 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3164 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3165 */
    3166 static inline bool VmaBlocksOnSamePage(
    3167  VkDeviceSize resourceAOffset,
    3168  VkDeviceSize resourceASize,
    3169  VkDeviceSize resourceBOffset,
    3170  VkDeviceSize pageSize)
    3171 {
    3172  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3173  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3174  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3175  VkDeviceSize resourceBStart = resourceBOffset;
    3176  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3177  return resourceAEndPage == resourceBStartPage;
    3178 }
    3179 
    3180 enum VmaSuballocationType
    3181 {
    3182  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3183  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3184  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3185  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3186  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3187  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3188  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3189 };
    3190 
    3191 /*
    3192 Returns true if given suballocation types could conflict and must respect
    3193 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3194 or linear image and another one is optimal image. If type is unknown, behave
    3195 conservatively.
    3196 */
    3197 static inline bool VmaIsBufferImageGranularityConflict(
    3198  VmaSuballocationType suballocType1,
    3199  VmaSuballocationType suballocType2)
    3200 {
    3201  if(suballocType1 > suballocType2)
    3202  {
    3203  VMA_SWAP(suballocType1, suballocType2);
    3204  }
    3205 
    3206  switch(suballocType1)
    3207  {
    3208  case VMA_SUBALLOCATION_TYPE_FREE:
    3209  return false;
    3210  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3211  return true;
    3212  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3213  return
    3214  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3215  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3216  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3220  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3221  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3222  return
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3224  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3225  return false;
    3226  default:
    3227  VMA_ASSERT(0);
    3228  return true;
    3229  }
    3230 }
    3231 
    3232 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3233 {
    3234  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3235  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3236  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3237  {
    3238  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3239  }
    3240 }
    3241 
    3242 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3243 {
    3244  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3245  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3246  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3247  {
    3248  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3249  {
    3250  return false;
    3251  }
    3252  }
    3253  return true;
    3254 }
    3255 
    3256 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3257 struct VmaMutexLock
    3258 {
    3259  VMA_CLASS_NO_COPY(VmaMutexLock)
    3260 public:
    3261  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3262  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3263  {
    3264  if(m_pMutex)
    3265  {
    3266  m_pMutex->Lock();
    3267  }
    3268  }
    3269 
    3270  ~VmaMutexLock()
    3271  {
    3272  if(m_pMutex)
    3273  {
    3274  m_pMutex->Unlock();
    3275  }
    3276  }
    3277 
    3278 private:
    3279  VMA_MUTEX* m_pMutex;
    3280 };
    3281 
    3282 #if VMA_DEBUG_GLOBAL_MUTEX
    3283  static VMA_MUTEX gDebugGlobalMutex;
    3284  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3285 #else
    3286  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3287 #endif
    3288 
    3289 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3290 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3291 
    3292 /*
    3293 Performs binary search and returns iterator to first element that is greater or
    3294 equal to (key), according to comparison (cmp).
    3295 
    3296 Cmp should return true if first argument is less than second argument.
    3297 
    3298 Returned value is the found element, if present in the collection or place where
    3299 new element with value (key) should be inserted.
    3300 */
    3301 template <typename CmpLess, typename IterT, typename KeyT>
    3302 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3303 {
    3304  size_t down = 0, up = (end - beg);
    3305  while(down < up)
    3306  {
    3307  const size_t mid = (down + up) / 2;
    3308  if(cmp(*(beg+mid), key))
    3309  {
    3310  down = mid + 1;
    3311  }
    3312  else
    3313  {
    3314  up = mid;
    3315  }
    3316  }
    3317  return beg + down;
    3318 }
    3319 
    3321 // Memory allocation
    3322 
    3323 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3324 {
    3325  if((pAllocationCallbacks != VMA_NULL) &&
    3326  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3327  {
    3328  return (*pAllocationCallbacks->pfnAllocation)(
    3329  pAllocationCallbacks->pUserData,
    3330  size,
    3331  alignment,
    3332  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3333  }
    3334  else
    3335  {
    3336  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3337  }
    3338 }
    3339 
    3340 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3341 {
    3342  if((pAllocationCallbacks != VMA_NULL) &&
    3343  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3344  {
    3345  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3346  }
    3347  else
    3348  {
    3349  VMA_SYSTEM_FREE(ptr);
    3350  }
    3351 }
    3352 
    3353 template<typename T>
    3354 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3355 {
    3356  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3357 }
    3358 
    3359 template<typename T>
    3360 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3361 {
    3362  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3363 }
    3364 
    3365 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3366 
    3367 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3368 
    3369 template<typename T>
    3370 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3371 {
    3372  ptr->~T();
    3373  VmaFree(pAllocationCallbacks, ptr);
    3374 }
    3375 
    3376 template<typename T>
    3377 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3378 {
    3379  if(ptr != VMA_NULL)
    3380  {
    3381  for(size_t i = count; i--; )
    3382  {
    3383  ptr[i].~T();
    3384  }
    3385  VmaFree(pAllocationCallbacks, ptr);
    3386  }
    3387 }
    3388 
    3389 // STL-compatible allocator.
    3390 template<typename T>
    3391 class VmaStlAllocator
    3392 {
    3393 public:
    3394  const VkAllocationCallbacks* const m_pCallbacks;
    3395  typedef T value_type;
    3396 
    3397  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3398  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3399 
    3400  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3401  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3402 
    3403  template<typename U>
    3404  bool operator==(const VmaStlAllocator<U>& rhs) const
    3405  {
    3406  return m_pCallbacks == rhs.m_pCallbacks;
    3407  }
    3408  template<typename U>
    3409  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3410  {
    3411  return m_pCallbacks != rhs.m_pCallbacks;
    3412  }
    3413 
    3414  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3415 };
    3416 
    3417 #if VMA_USE_STL_VECTOR
    3418 
    3419 #define VmaVector std::vector
    3420 
    3421 template<typename T, typename allocatorT>
    3422 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3423 {
    3424  vec.insert(vec.begin() + index, item);
    3425 }
    3426 
    3427 template<typename T, typename allocatorT>
    3428 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3429 {
    3430  vec.erase(vec.begin() + index);
    3431 }
    3432 
    3433 #else // #if VMA_USE_STL_VECTOR
    3434 
    3435 /* Class with interface compatible with subset of std::vector.
    3436 T must be POD because constructors and destructors are not called and memcpy is
    3437 used for these objects. */
    3438 template<typename T, typename AllocatorT>
    3439 class VmaVector
    3440 {
    3441 public:
    3442  typedef T value_type;
    3443 
    3444  VmaVector(const AllocatorT& allocator) :
    3445  m_Allocator(allocator),
    3446  m_pArray(VMA_NULL),
    3447  m_Count(0),
    3448  m_Capacity(0)
    3449  {
    3450  }
    3451 
    3452  VmaVector(size_t count, const AllocatorT& allocator) :
    3453  m_Allocator(allocator),
    3454  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3455  m_Count(count),
    3456  m_Capacity(count)
    3457  {
    3458  }
    3459 
    3460  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3461  m_Allocator(src.m_Allocator),
    3462  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3463  m_Count(src.m_Count),
    3464  m_Capacity(src.m_Count)
    3465  {
    3466  if(m_Count != 0)
    3467  {
    3468  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3469  }
    3470  }
    3471 
    3472  ~VmaVector()
    3473  {
    3474  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3475  }
    3476 
    3477  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3478  {
    3479  if(&rhs != this)
    3480  {
    3481  resize(rhs.m_Count);
    3482  if(m_Count != 0)
    3483  {
    3484  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3485  }
    3486  }
    3487  return *this;
    3488  }
    3489 
    3490  bool empty() const { return m_Count == 0; }
    3491  size_t size() const { return m_Count; }
    3492  T* data() { return m_pArray; }
    3493  const T* data() const { return m_pArray; }
    3494 
    3495  T& operator[](size_t index)
    3496  {
    3497  VMA_HEAVY_ASSERT(index < m_Count);
    3498  return m_pArray[index];
    3499  }
    3500  const T& operator[](size_t index) const
    3501  {
    3502  VMA_HEAVY_ASSERT(index < m_Count);
    3503  return m_pArray[index];
    3504  }
    3505 
    3506  T& front()
    3507  {
    3508  VMA_HEAVY_ASSERT(m_Count > 0);
    3509  return m_pArray[0];
    3510  }
    3511  const T& front() const
    3512  {
    3513  VMA_HEAVY_ASSERT(m_Count > 0);
    3514  return m_pArray[0];
    3515  }
    3516  T& back()
    3517  {
    3518  VMA_HEAVY_ASSERT(m_Count > 0);
    3519  return m_pArray[m_Count - 1];
    3520  }
    3521  const T& back() const
    3522  {
    3523  VMA_HEAVY_ASSERT(m_Count > 0);
    3524  return m_pArray[m_Count - 1];
    3525  }
    3526 
    3527  void reserve(size_t newCapacity, bool freeMemory = false)
    3528  {
    3529  newCapacity = VMA_MAX(newCapacity, m_Count);
    3530 
    3531  if((newCapacity < m_Capacity) && !freeMemory)
    3532  {
    3533  newCapacity = m_Capacity;
    3534  }
    3535 
    3536  if(newCapacity != m_Capacity)
    3537  {
    3538  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3539  if(m_Count != 0)
    3540  {
    3541  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3542  }
    3543  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3544  m_Capacity = newCapacity;
    3545  m_pArray = newArray;
    3546  }
    3547  }
    3548 
    3549  void resize(size_t newCount, bool freeMemory = false)
    3550  {
    3551  size_t newCapacity = m_Capacity;
    3552  if(newCount > m_Capacity)
    3553  {
    3554  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3555  }
    3556  else if(freeMemory)
    3557  {
    3558  newCapacity = newCount;
    3559  }
    3560 
    3561  if(newCapacity != m_Capacity)
    3562  {
    3563  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3564  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3565  if(elementsToCopy != 0)
    3566  {
    3567  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3568  }
    3569  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3570  m_Capacity = newCapacity;
    3571  m_pArray = newArray;
    3572  }
    3573 
    3574  m_Count = newCount;
    3575  }
    3576 
    3577  void clear(bool freeMemory = false)
    3578  {
    3579  resize(0, freeMemory);
    3580  }
    3581 
    3582  void insert(size_t index, const T& src)
    3583  {
    3584  VMA_HEAVY_ASSERT(index <= m_Count);
    3585  const size_t oldCount = size();
    3586  resize(oldCount + 1);
    3587  if(index < oldCount)
    3588  {
    3589  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3590  }
    3591  m_pArray[index] = src;
    3592  }
    3593 
    3594  void remove(size_t index)
    3595  {
    3596  VMA_HEAVY_ASSERT(index < m_Count);
    3597  const size_t oldCount = size();
    3598  if(index < oldCount - 1)
    3599  {
    3600  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3601  }
    3602  resize(oldCount - 1);
    3603  }
    3604 
    3605  void push_back(const T& src)
    3606  {
    3607  const size_t newIndex = size();
    3608  resize(newIndex + 1);
    3609  m_pArray[newIndex] = src;
    3610  }
    3611 
    3612  void pop_back()
    3613  {
    3614  VMA_HEAVY_ASSERT(m_Count > 0);
    3615  resize(size() - 1);
    3616  }
    3617 
    3618  void push_front(const T& src)
    3619  {
    3620  insert(0, src);
    3621  }
    3622 
    3623  void pop_front()
    3624  {
    3625  VMA_HEAVY_ASSERT(m_Count > 0);
    3626  remove(0);
    3627  }
    3628 
    3629  typedef T* iterator;
    3630 
    3631  iterator begin() { return m_pArray; }
    3632  iterator end() { return m_pArray + m_Count; }
    3633 
    3634 private:
    3635  AllocatorT m_Allocator;
    3636  T* m_pArray;
    3637  size_t m_Count;
    3638  size_t m_Capacity;
    3639 };
    3640 
    3641 template<typename T, typename allocatorT>
    3642 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3643 {
    3644  vec.insert(index, item);
    3645 }
    3646 
    3647 template<typename T, typename allocatorT>
    3648 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3649 {
    3650  vec.remove(index);
    3651 }
    3652 
    3653 #endif // #if VMA_USE_STL_VECTOR
    3654 
    3655 template<typename CmpLess, typename VectorT>
    3656 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3657 {
    3658  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3659  vector.data(),
    3660  vector.data() + vector.size(),
    3661  value,
    3662  CmpLess()) - vector.data();
    3663  VmaVectorInsert(vector, indexToInsert, value);
    3664  return indexToInsert;
    3665 }
    3666 
    3667 template<typename CmpLess, typename VectorT>
    3668 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3669 {
    3670  CmpLess comparator;
    3671  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3672  vector.begin(),
    3673  vector.end(),
    3674  value,
    3675  comparator);
    3676  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3677  {
    3678  size_t indexToRemove = it - vector.begin();
    3679  VmaVectorRemove(vector, indexToRemove);
    3680  return true;
    3681  }
    3682  return false;
    3683 }
    3684 
    3685 template<typename CmpLess, typename IterT, typename KeyT>
    3686 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3687 {
    3688  CmpLess comparator;
    3689  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3690  beg, end, value, comparator);
    3691  if(it == end ||
    3692  (!comparator(*it, value) && !comparator(value, *it)))
    3693  {
    3694  return it;
    3695  }
    3696  return end;
    3697 }
    3698 
    3700 // class VmaPoolAllocator
    3701 
    3702 /*
    3703 Allocator for objects of type T using a list of arrays (pools) to speed up
    3704 allocation. Number of elements that can be allocated is not bounded because
    3705 allocator can create multiple blocks.
    3706 */
    3707 template<typename T>
    3708 class VmaPoolAllocator
    3709 {
    3710  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3711 public:
    3712  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3713  ~VmaPoolAllocator();
    3714  void Clear();
    3715  T* Alloc();
    3716  void Free(T* ptr);
    3717 
    3718 private:
    3719  union Item
    3720  {
    3721  uint32_t NextFreeIndex;
    3722  T Value;
    3723  };
    3724 
    3725  struct ItemBlock
    3726  {
    3727  Item* pItems;
    3728  uint32_t FirstFreeIndex;
    3729  };
    3730 
    3731  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3732  size_t m_ItemsPerBlock;
    3733  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3734 
    3735  ItemBlock& CreateNewBlock();
    3736 };
    3737 
    3738 template<typename T>
    3739 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3740  m_pAllocationCallbacks(pAllocationCallbacks),
    3741  m_ItemsPerBlock(itemsPerBlock),
    3742  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3743 {
    3744  VMA_ASSERT(itemsPerBlock > 0);
    3745 }
    3746 
    3747 template<typename T>
    3748 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3749 {
    3750  Clear();
    3751 }
    3752 
    3753 template<typename T>
    3754 void VmaPoolAllocator<T>::Clear()
    3755 {
    3756  for(size_t i = m_ItemBlocks.size(); i--; )
    3757  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3758  m_ItemBlocks.clear();
    3759 }
    3760 
    3761 template<typename T>
    3762 T* VmaPoolAllocator<T>::Alloc()
    3763 {
    3764  for(size_t i = m_ItemBlocks.size(); i--; )
    3765  {
    3766  ItemBlock& block = m_ItemBlocks[i];
    3767  // This block has some free items: Use first one.
    3768  if(block.FirstFreeIndex != UINT32_MAX)
    3769  {
    3770  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3771  block.FirstFreeIndex = pItem->NextFreeIndex;
    3772  return &pItem->Value;
    3773  }
    3774  }
    3775 
    3776  // No block has free item: Create new one and use it.
    3777  ItemBlock& newBlock = CreateNewBlock();
    3778  Item* const pItem = &newBlock.pItems[0];
    3779  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3780  return &pItem->Value;
    3781 }
    3782 
    3783 template<typename T>
    3784 void VmaPoolAllocator<T>::Free(T* ptr)
    3785 {
    3786  // Search all memory blocks to find ptr.
    3787  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3788  {
    3789  ItemBlock& block = m_ItemBlocks[i];
    3790 
    3791  // Casting to union.
    3792  Item* pItemPtr;
    3793  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3794 
    3795  // Check if pItemPtr is in address range of this block.
    3796  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3797  {
    3798  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3799  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3800  block.FirstFreeIndex = index;
    3801  return;
    3802  }
    3803  }
    3804  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3805 }
    3806 
    3807 template<typename T>
    3808 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3809 {
    3810  ItemBlock newBlock = {
    3811  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3812 
    3813  m_ItemBlocks.push_back(newBlock);
    3814 
    3815  // Setup singly-linked list of all free items in this block.
    3816  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3817  newBlock.pItems[i].NextFreeIndex = i + 1;
    3818  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3819  return m_ItemBlocks.back();
    3820 }
    3821 
    3823 // class VmaRawList, VmaList
    3824 
    3825 #if VMA_USE_STL_LIST
    3826 
    3827 #define VmaList std::list
    3828 
    3829 #else // #if VMA_USE_STL_LIST
    3830 
    3831 template<typename T>
    3832 struct VmaListItem
    3833 {
    3834  VmaListItem* pPrev;
    3835  VmaListItem* pNext;
    3836  T Value;
    3837 };
    3838 
    3839 // Doubly linked list.
    3840 template<typename T>
    3841 class VmaRawList
    3842 {
    3843  VMA_CLASS_NO_COPY(VmaRawList)
    3844 public:
    3845  typedef VmaListItem<T> ItemType;
    3846 
    3847  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3848  ~VmaRawList();
    3849  void Clear();
    3850 
    3851  size_t GetCount() const { return m_Count; }
    3852  bool IsEmpty() const { return m_Count == 0; }
    3853 
    3854  ItemType* Front() { return m_pFront; }
    3855  const ItemType* Front() const { return m_pFront; }
    3856  ItemType* Back() { return m_pBack; }
    3857  const ItemType* Back() const { return m_pBack; }
    3858 
    3859  ItemType* PushBack();
    3860  ItemType* PushFront();
    3861  ItemType* PushBack(const T& value);
    3862  ItemType* PushFront(const T& value);
    3863  void PopBack();
    3864  void PopFront();
    3865 
    3866  // Item can be null - it means PushBack.
    3867  ItemType* InsertBefore(ItemType* pItem);
    3868  // Item can be null - it means PushFront.
    3869  ItemType* InsertAfter(ItemType* pItem);
    3870 
    3871  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3872  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3873 
    3874  void Remove(ItemType* pItem);
    3875 
    3876 private:
    3877  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3878  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3879  ItemType* m_pFront;
    3880  ItemType* m_pBack;
    3881  size_t m_Count;
    3882 };
    3883 
    3884 template<typename T>
    3885 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3886  m_pAllocationCallbacks(pAllocationCallbacks),
    3887  m_ItemAllocator(pAllocationCallbacks, 128),
    3888  m_pFront(VMA_NULL),
    3889  m_pBack(VMA_NULL),
    3890  m_Count(0)
    3891 {
    3892 }
    3893 
    3894 template<typename T>
    3895 VmaRawList<T>::~VmaRawList()
    3896 {
    3897  // Intentionally not calling Clear, because that would be unnecessary
    3898  // computations to return all items to m_ItemAllocator as free.
    3899 }
    3900 
    3901 template<typename T>
    3902 void VmaRawList<T>::Clear()
    3903 {
    3904  if(IsEmpty() == false)
    3905  {
    3906  ItemType* pItem = m_pBack;
    3907  while(pItem != VMA_NULL)
    3908  {
    3909  ItemType* const pPrevItem = pItem->pPrev;
    3910  m_ItemAllocator.Free(pItem);
    3911  pItem = pPrevItem;
    3912  }
    3913  m_pFront = VMA_NULL;
    3914  m_pBack = VMA_NULL;
    3915  m_Count = 0;
    3916  }
    3917 }
    3918 
    3919 template<typename T>
    3920 VmaListItem<T>* VmaRawList<T>::PushBack()
    3921 {
    3922  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3923  pNewItem->pNext = VMA_NULL;
    3924  if(IsEmpty())
    3925  {
    3926  pNewItem->pPrev = VMA_NULL;
    3927  m_pFront = pNewItem;
    3928  m_pBack = pNewItem;
    3929  m_Count = 1;
    3930  }
    3931  else
    3932  {
    3933  pNewItem->pPrev = m_pBack;
    3934  m_pBack->pNext = pNewItem;
    3935  m_pBack = pNewItem;
    3936  ++m_Count;
    3937  }
    3938  return pNewItem;
    3939 }
    3940 
    3941 template<typename T>
    3942 VmaListItem<T>* VmaRawList<T>::PushFront()
    3943 {
    3944  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3945  pNewItem->pPrev = VMA_NULL;
    3946  if(IsEmpty())
    3947  {
    3948  pNewItem->pNext = VMA_NULL;
    3949  m_pFront = pNewItem;
    3950  m_pBack = pNewItem;
    3951  m_Count = 1;
    3952  }
    3953  else
    3954  {
    3955  pNewItem->pNext = m_pFront;
    3956  m_pFront->pPrev = pNewItem;
    3957  m_pFront = pNewItem;
    3958  ++m_Count;
    3959  }
    3960  return pNewItem;
    3961 }
    3962 
    3963 template<typename T>
    3964 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3965 {
    3966  ItemType* const pNewItem = PushBack();
    3967  pNewItem->Value = value;
    3968  return pNewItem;
    3969 }
    3970 
    3971 template<typename T>
    3972 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3973 {
    3974  ItemType* const pNewItem = PushFront();
    3975  pNewItem->Value = value;
    3976  return pNewItem;
    3977 }
    3978 
    3979 template<typename T>
    3980 void VmaRawList<T>::PopBack()
    3981 {
    3982  VMA_HEAVY_ASSERT(m_Count > 0);
    3983  ItemType* const pBackItem = m_pBack;
    3984  ItemType* const pPrevItem = pBackItem->pPrev;
    3985  if(pPrevItem != VMA_NULL)
    3986  {
    3987  pPrevItem->pNext = VMA_NULL;
    3988  }
    3989  m_pBack = pPrevItem;
    3990  m_ItemAllocator.Free(pBackItem);
    3991  --m_Count;
    3992 }
    3993 
    3994 template<typename T>
    3995 void VmaRawList<T>::PopFront()
    3996 {
    3997  VMA_HEAVY_ASSERT(m_Count > 0);
    3998  ItemType* const pFrontItem = m_pFront;
    3999  ItemType* const pNextItem = pFrontItem->pNext;
    4000  if(pNextItem != VMA_NULL)
    4001  {
    4002  pNextItem->pPrev = VMA_NULL;
    4003  }
    4004  m_pFront = pNextItem;
    4005  m_ItemAllocator.Free(pFrontItem);
    4006  --m_Count;
    4007 }
    4008 
    4009 template<typename T>
    4010 void VmaRawList<T>::Remove(ItemType* pItem)
    4011 {
    4012  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4013  VMA_HEAVY_ASSERT(m_Count > 0);
    4014 
    4015  if(pItem->pPrev != VMA_NULL)
    4016  {
    4017  pItem->pPrev->pNext = pItem->pNext;
    4018  }
    4019  else
    4020  {
    4021  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4022  m_pFront = pItem->pNext;
    4023  }
    4024 
    4025  if(pItem->pNext != VMA_NULL)
    4026  {
    4027  pItem->pNext->pPrev = pItem->pPrev;
    4028  }
    4029  else
    4030  {
    4031  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4032  m_pBack = pItem->pPrev;
    4033  }
    4034 
    4035  m_ItemAllocator.Free(pItem);
    4036  --m_Count;
    4037 }
    4038 
    4039 template<typename T>
    4040 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4041 {
    4042  if(pItem != VMA_NULL)
    4043  {
    4044  ItemType* const prevItem = pItem->pPrev;
    4045  ItemType* const newItem = m_ItemAllocator.Alloc();
    4046  newItem->pPrev = prevItem;
    4047  newItem->pNext = pItem;
    4048  pItem->pPrev = newItem;
    4049  if(prevItem != VMA_NULL)
    4050  {
    4051  prevItem->pNext = newItem;
    4052  }
    4053  else
    4054  {
    4055  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4056  m_pFront = newItem;
    4057  }
    4058  ++m_Count;
    4059  return newItem;
    4060  }
    4061  else
    4062  return PushBack();
    4063 }
    4064 
    4065 template<typename T>
    4066 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4067 {
    4068  if(pItem != VMA_NULL)
    4069  {
    4070  ItemType* const nextItem = pItem->pNext;
    4071  ItemType* const newItem = m_ItemAllocator.Alloc();
    4072  newItem->pNext = nextItem;
    4073  newItem->pPrev = pItem;
    4074  pItem->pNext = newItem;
    4075  if(nextItem != VMA_NULL)
    4076  {
    4077  nextItem->pPrev = newItem;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = newItem;
    4083  }
    4084  ++m_Count;
    4085  return newItem;
    4086  }
    4087  else
    4088  return PushFront();
    4089 }
    4090 
    4091 template<typename T>
    4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4093 {
    4094  ItemType* const newItem = InsertBefore(pItem);
    4095  newItem->Value = value;
    4096  return newItem;
    4097 }
    4098 
    4099 template<typename T>
    4100 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4101 {
    4102  ItemType* const newItem = InsertAfter(pItem);
    4103  newItem->Value = value;
    4104  return newItem;
    4105 }
    4106 
    4107 template<typename T, typename AllocatorT>
    4108 class VmaList
    4109 {
    4110  VMA_CLASS_NO_COPY(VmaList)
    4111 public:
    4112  class iterator
    4113  {
    4114  public:
    4115  iterator() :
    4116  m_pList(VMA_NULL),
    4117  m_pItem(VMA_NULL)
    4118  {
    4119  }
    4120 
    4121  T& operator*() const
    4122  {
    4123  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4124  return m_pItem->Value;
    4125  }
    4126  T* operator->() const
    4127  {
    4128  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4129  return &m_pItem->Value;
    4130  }
    4131 
    4132  iterator& operator++()
    4133  {
    4134  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4135  m_pItem = m_pItem->pNext;
    4136  return *this;
    4137  }
    4138  iterator& operator--()
    4139  {
    4140  if(m_pItem != VMA_NULL)
    4141  {
    4142  m_pItem = m_pItem->pPrev;
    4143  }
    4144  else
    4145  {
    4146  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4147  m_pItem = m_pList->Back();
    4148  }
    4149  return *this;
    4150  }
    4151 
    4152  iterator operator++(int)
    4153  {
    4154  iterator result = *this;
    4155  ++*this;
    4156  return result;
    4157  }
    4158  iterator operator--(int)
    4159  {
    4160  iterator result = *this;
    4161  --*this;
    4162  return result;
    4163  }
    4164 
    4165  bool operator==(const iterator& rhs) const
    4166  {
    4167  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4168  return m_pItem == rhs.m_pItem;
    4169  }
    4170  bool operator!=(const iterator& rhs) const
    4171  {
    4172  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4173  return m_pItem != rhs.m_pItem;
    4174  }
    4175 
    4176  private:
    4177  VmaRawList<T>* m_pList;
    4178  VmaListItem<T>* m_pItem;
    4179 
    4180  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4181  m_pList(pList),
    4182  m_pItem(pItem)
    4183  {
    4184  }
    4185 
    4186  friend class VmaList<T, AllocatorT>;
    4187  };
    4188 
    4189  class const_iterator
    4190  {
    4191  public:
    4192  const_iterator() :
    4193  m_pList(VMA_NULL),
    4194  m_pItem(VMA_NULL)
    4195  {
    4196  }
    4197 
    4198  const_iterator(const iterator& src) :
    4199  m_pList(src.m_pList),
    4200  m_pItem(src.m_pItem)
    4201  {
    4202  }
    4203 
    4204  const T& operator*() const
    4205  {
    4206  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4207  return m_pItem->Value;
    4208  }
    4209  const T* operator->() const
    4210  {
    4211  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4212  return &m_pItem->Value;
    4213  }
    4214 
    4215  const_iterator& operator++()
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4218  m_pItem = m_pItem->pNext;
    4219  return *this;
    4220  }
    4221  const_iterator& operator--()
    4222  {
    4223  if(m_pItem != VMA_NULL)
    4224  {
    4225  m_pItem = m_pItem->pPrev;
    4226  }
    4227  else
    4228  {
    4229  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4230  m_pItem = m_pList->Back();
    4231  }
    4232  return *this;
    4233  }
    4234 
    4235  const_iterator operator++(int)
    4236  {
    4237  const_iterator result = *this;
    4238  ++*this;
    4239  return result;
    4240  }
    4241  const_iterator operator--(int)
    4242  {
    4243  const_iterator result = *this;
    4244  --*this;
    4245  return result;
    4246  }
    4247 
    4248  bool operator==(const const_iterator& rhs) const
    4249  {
    4250  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4251  return m_pItem == rhs.m_pItem;
    4252  }
    4253  bool operator!=(const const_iterator& rhs) const
    4254  {
    4255  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4256  return m_pItem != rhs.m_pItem;
    4257  }
    4258 
    4259  private:
    4260  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4261  m_pList(pList),
    4262  m_pItem(pItem)
    4263  {
    4264  }
    4265 
    4266  const VmaRawList<T>* m_pList;
    4267  const VmaListItem<T>* m_pItem;
    4268 
    4269  friend class VmaList<T, AllocatorT>;
    4270  };
    4271 
    4272  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4273 
    4274  bool empty() const { return m_RawList.IsEmpty(); }
    4275  size_t size() const { return m_RawList.GetCount(); }
    4276 
    4277  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4278  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4279 
    4280  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4281  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4282 
    4283  void clear() { m_RawList.Clear(); }
    4284  void push_back(const T& value) { m_RawList.PushBack(value); }
    4285  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4286  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4287 
    4288 private:
    4289  VmaRawList<T> m_RawList;
    4290 };
    4291 
    4292 #endif // #if VMA_USE_STL_LIST
    4293 
    4295 // class VmaMap
    4296 
    4297 // Unused in this version.
    4298 #if 0
    4299 
    4300 #if VMA_USE_STL_UNORDERED_MAP
    4301 
    4302 #define VmaPair std::pair
    4303 
    4304 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4305  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4306 
    4307 #else // #if VMA_USE_STL_UNORDERED_MAP
    4308 
    4309 template<typename T1, typename T2>
    4310 struct VmaPair
    4311 {
    4312  T1 first;
    4313  T2 second;
    4314 
    4315  VmaPair() : first(), second() { }
    4316  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4317 };
    4318 
    4319 /* Class compatible with subset of interface of std::unordered_map.
    4320 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4321 */
    4322 template<typename KeyT, typename ValueT>
    4323 class VmaMap
    4324 {
    4325 public:
    4326  typedef VmaPair<KeyT, ValueT> PairType;
    4327  typedef PairType* iterator;
    4328 
    4329  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4330 
    4331  iterator begin() { return m_Vector.begin(); }
    4332  iterator end() { return m_Vector.end(); }
    4333 
    4334  void insert(const PairType& pair);
    4335  iterator find(const KeyT& key);
    4336  void erase(iterator it);
    4337 
    4338 private:
    4339  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4340 };
    4341 
    4342 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4343 
    4344 template<typename FirstT, typename SecondT>
    4345 struct VmaPairFirstLess
    4346 {
    4347  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4348  {
    4349  return lhs.first < rhs.first;
    4350  }
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4352  {
    4353  return lhs.first < rhsFirst;
    4354  }
    4355 };
    4356 
    4357 template<typename KeyT, typename ValueT>
    4358 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4359 {
    4360  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4361  m_Vector.data(),
    4362  m_Vector.data() + m_Vector.size(),
    4363  pair,
    4364  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4365  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4366 }
    4367 
    4368 template<typename KeyT, typename ValueT>
    4369 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4370 {
    4371  PairType* it = VmaBinaryFindFirstNotLess(
    4372  m_Vector.data(),
    4373  m_Vector.data() + m_Vector.size(),
    4374  key,
    4375  VmaPairFirstLess<KeyT, ValueT>());
    4376  if((it != m_Vector.end()) && (it->first == key))
    4377  {
    4378  return it;
    4379  }
    4380  else
    4381  {
    4382  return m_Vector.end();
    4383  }
    4384 }
    4385 
    4386 template<typename KeyT, typename ValueT>
    4387 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4388 {
    4389  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4390 }
    4391 
    4392 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4393 
    4394 #endif // #if 0
    4395 
    4397 
    4398 class VmaDeviceMemoryBlock;
    4399 
    4400 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4401 
    4402 struct VmaAllocation_T
    4403 {
    4404  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4405 private:
    4406  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4407 
    4408  enum FLAGS
    4409  {
    4410  FLAG_USER_DATA_STRING = 0x01,
    4411  };
    4412 
    4413 public:
    4414  enum ALLOCATION_TYPE
    4415  {
    4416  ALLOCATION_TYPE_NONE,
    4417  ALLOCATION_TYPE_BLOCK,
    4418  ALLOCATION_TYPE_DEDICATED,
    4419  };
    4420 
    4421  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4422  m_Alignment(1),
    4423  m_Size(0),
    4424  m_pUserData(VMA_NULL),
    4425  m_LastUseFrameIndex(currentFrameIndex),
    4426  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4427  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4428  m_MapCount(0),
    4429  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4430  {
    4431 #if VMA_STATS_STRING_ENABLED
    4432  m_CreationFrameIndex = currentFrameIndex;
    4433  m_BufferImageUsage = 0;
    4434 #endif
    4435  }
    4436 
    4437  ~VmaAllocation_T()
    4438  {
    4439  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4440 
    4441  // Check if owned string was freed.
    4442  VMA_ASSERT(m_pUserData == VMA_NULL);
    4443  }
    4444 
    4445  void InitBlockAllocation(
    4446  VmaPool hPool,
    4447  VmaDeviceMemoryBlock* block,
    4448  VkDeviceSize offset,
    4449  VkDeviceSize alignment,
    4450  VkDeviceSize size,
    4451  VmaSuballocationType suballocationType,
    4452  bool mapped,
    4453  bool canBecomeLost)
    4454  {
    4455  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4456  VMA_ASSERT(block != VMA_NULL);
    4457  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4458  m_Alignment = alignment;
    4459  m_Size = size;
    4460  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4461  m_SuballocationType = (uint8_t)suballocationType;
    4462  m_BlockAllocation.m_hPool = hPool;
    4463  m_BlockAllocation.m_Block = block;
    4464  m_BlockAllocation.m_Offset = offset;
    4465  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4466  }
    4467 
    4468  void InitLost()
    4469  {
    4470  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4471  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4472  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4473  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4474  m_BlockAllocation.m_Block = VMA_NULL;
    4475  m_BlockAllocation.m_Offset = 0;
    4476  m_BlockAllocation.m_CanBecomeLost = true;
    4477  }
    4478 
    4479  void ChangeBlockAllocation(
    4480  VmaAllocator hAllocator,
    4481  VmaDeviceMemoryBlock* block,
    4482  VkDeviceSize offset);
    4483 
    4484  // pMappedData not null means allocation is created with MAPPED flag.
    4485  void InitDedicatedAllocation(
    4486  uint32_t memoryTypeIndex,
    4487  VkDeviceMemory hMemory,
    4488  VmaSuballocationType suballocationType,
    4489  void* pMappedData,
    4490  VkDeviceSize size)
    4491  {
    4492  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4493  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4494  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4495  m_Alignment = 0;
    4496  m_Size = size;
    4497  m_SuballocationType = (uint8_t)suballocationType;
    4498  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4499  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4500  m_DedicatedAllocation.m_hMemory = hMemory;
    4501  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4502  }
    4503 
    4504  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4505  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4506  VkDeviceSize GetSize() const { return m_Size; }
    4507  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4508  void* GetUserData() const { return m_pUserData; }
    4509  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4510  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4511 
    4512  VmaDeviceMemoryBlock* GetBlock() const
    4513  {
    4514  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4515  return m_BlockAllocation.m_Block;
    4516  }
    4517  VkDeviceSize GetOffset() const;
    4518  VkDeviceMemory GetMemory() const;
    4519  uint32_t GetMemoryTypeIndex() const;
    4520  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4521  void* GetMappedData() const;
    4522  bool CanBecomeLost() const;
    4523  VmaPool GetPool() const;
    4524 
    4525  uint32_t GetLastUseFrameIndex() const
    4526  {
    4527  return m_LastUseFrameIndex.load();
    4528  }
    4529  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4530  {
    4531  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4532  }
    4533  /*
    4534  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4535  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4536  - Else, returns false.
    4537 
    4538  If hAllocation is already lost, assert - you should not call it then.
    4539  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4540  */
    4541  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4542 
    4543  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4544  {
    4545  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4546  outInfo.blockCount = 1;
    4547  outInfo.allocationCount = 1;
    4548  outInfo.unusedRangeCount = 0;
    4549  outInfo.usedBytes = m_Size;
    4550  outInfo.unusedBytes = 0;
    4551  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4552  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4553  outInfo.unusedRangeSizeMax = 0;
    4554  }
    4555 
    4556  void BlockAllocMap();
    4557  void BlockAllocUnmap();
    4558  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4559  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4560 
    4561 #if VMA_STATS_STRING_ENABLED
    4562  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4563  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4564 
    4565  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4566  {
    4567  VMA_ASSERT(m_BufferImageUsage == 0);
    4568  m_BufferImageUsage = bufferImageUsage;
    4569  }
    4570 
    4571  void PrintParameters(class VmaJsonWriter& json) const;
    4572 #endif
    4573 
    4574 private:
    4575  VkDeviceSize m_Alignment;
    4576  VkDeviceSize m_Size;
    4577  void* m_pUserData;
    4578  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4579  uint8_t m_Type; // ALLOCATION_TYPE
    4580  uint8_t m_SuballocationType; // VmaSuballocationType
    4581  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4582  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4583  uint8_t m_MapCount;
    4584  uint8_t m_Flags; // enum FLAGS
    4585 
    4586  // Allocation out of VmaDeviceMemoryBlock.
    4587  struct BlockAllocation
    4588  {
    4589  VmaPool m_hPool; // Null if belongs to general memory.
    4590  VmaDeviceMemoryBlock* m_Block;
    4591  VkDeviceSize m_Offset;
    4592  bool m_CanBecomeLost;
    4593  };
    4594 
    4595  // Allocation for an object that has its own private VkDeviceMemory.
    4596  struct DedicatedAllocation
    4597  {
    4598  uint32_t m_MemoryTypeIndex;
    4599  VkDeviceMemory m_hMemory;
    4600  void* m_pMappedData; // Not null means memory is mapped.
    4601  };
    4602 
    4603  union
    4604  {
    4605  // Allocation out of VmaDeviceMemoryBlock.
    4606  BlockAllocation m_BlockAllocation;
    4607  // Allocation for an object that has its own private VkDeviceMemory.
    4608  DedicatedAllocation m_DedicatedAllocation;
    4609  };
    4610 
    4611 #if VMA_STATS_STRING_ENABLED
    4612  uint32_t m_CreationFrameIndex;
    4613  uint32_t m_BufferImageUsage; // 0 if unknown.
    4614 #endif
    4615 
    4616  void FreeUserDataString(VmaAllocator hAllocator);
    4617 };
    4618 
    4619 /*
    4620 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4621 allocated memory block or free.
    4622 */
    4623 struct VmaSuballocation
    4624 {
    4625  VkDeviceSize offset;
    4626  VkDeviceSize size;
    4627  VmaAllocation hAllocation;
    4628  VmaSuballocationType type;
    4629 };
    4630 
    4631 // Comparator for offsets.
    4632 struct VmaSuballocationOffsetLess
    4633 {
    4634  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4635  {
    4636  return lhs.offset < rhs.offset;
    4637  }
    4638 };
    4639 struct VmaSuballocationOffsetGreater
    4640 {
    4641  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4642  {
    4643  return lhs.offset > rhs.offset;
    4644  }
    4645 };
    4646 
    4647 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4648 
    4649 // Cost of one additional allocation lost, as equivalent in bytes.
    4650 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4651 
    4652 /*
    4653 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4654 
    4655 If canMakeOtherLost was false:
    4656 - item points to a FREE suballocation.
    4657 - itemsToMakeLostCount is 0.
    4658 
    4659 If canMakeOtherLost was true:
    4660 - item points to first of sequence of suballocations, which are either FREE,
    4661  or point to VmaAllocations that can become lost.
    4662 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4663  the requested allocation to succeed.
    4664 */
    4665 struct VmaAllocationRequest
    4666 {
    4667  VkDeviceSize offset;
    4668  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4669  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4670  VmaSuballocationList::iterator item;
    4671  size_t itemsToMakeLostCount;
    4672  void* customData;
    4673 
    4674  VkDeviceSize CalcCost() const
    4675  {
    4676  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4677  }
    4678 };
    4679 
    4680 /*
    4681 Data structure used for bookkeeping of allocations and unused ranges of memory
    4682 in a single VkDeviceMemory block.
    4683 */
    4684 class VmaBlockMetadata
    4685 {
    4686 public:
    4687  VmaBlockMetadata(VmaAllocator hAllocator);
    4688  virtual ~VmaBlockMetadata() { }
    4689  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4690 
    4691  // Validates all data structures inside this object. If not valid, returns false.
    4692  virtual bool Validate() const = 0;
    4693  VkDeviceSize GetSize() const { return m_Size; }
    4694  virtual size_t GetAllocationCount() const = 0;
    4695  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4696  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4697  // Returns true if this block is empty - contains only single free suballocation.
    4698  virtual bool IsEmpty() const = 0;
    4699 
    4700  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4701  // Shouldn't modify blockCount.
    4702  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4703 
    4704 #if VMA_STATS_STRING_ENABLED
    4705  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4706 #endif
    4707 
    4708  // Tries to find a place for suballocation with given parameters inside this block.
    4709  // If succeeded, fills pAllocationRequest and returns true.
    4710  // If failed, returns false.
    4711  virtual bool CreateAllocationRequest(
    4712  uint32_t currentFrameIndex,
    4713  uint32_t frameInUseCount,
    4714  VkDeviceSize bufferImageGranularity,
    4715  VkDeviceSize allocSize,
    4716  VkDeviceSize allocAlignment,
    4717  bool upperAddress,
    4718  VmaSuballocationType allocType,
    4719  bool canMakeOtherLost,
    4720  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4721  VmaAllocationRequest* pAllocationRequest) = 0;
    4722 
    4723  virtual bool MakeRequestedAllocationsLost(
    4724  uint32_t currentFrameIndex,
    4725  uint32_t frameInUseCount,
    4726  VmaAllocationRequest* pAllocationRequest) = 0;
    4727 
    4728  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4729 
    4730  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4731 
    4732  // Makes actual allocation based on request. Request must already be checked and valid.
    4733  virtual void Alloc(
    4734  const VmaAllocationRequest& request,
    4735  VmaSuballocationType type,
    4736  VkDeviceSize allocSize,
    4737  bool upperAddress,
    4738  VmaAllocation hAllocation) = 0;
    4739 
    4740  // Frees suballocation assigned to given memory region.
    4741  virtual void Free(const VmaAllocation allocation) = 0;
    4742  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4743 
    4744 protected:
    4745  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4746 
    4747 #if VMA_STATS_STRING_ENABLED
    4748  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4749  VkDeviceSize unusedBytes,
    4750  size_t allocationCount,
    4751  size_t unusedRangeCount) const;
    4752  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4753  VkDeviceSize offset,
    4754  VmaAllocation hAllocation) const;
    4755  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4756  VkDeviceSize offset,
    4757  VkDeviceSize size) const;
    4758  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4759 #endif
    4760 
    4761 private:
    4762  VkDeviceSize m_Size;
    4763  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4764 };
    4765 
    4766 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4767  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4768  return false; \
    4769  } } while(false)
    4770 
    4771 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4772 {
    4773  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4774 public:
    4775  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4776  virtual ~VmaBlockMetadata_Generic();
    4777  virtual void Init(VkDeviceSize size);
    4778 
    4779  virtual bool Validate() const;
    4780  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4781  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4782  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4783  virtual bool IsEmpty() const;
    4784 
    4785  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4786  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4787 
    4788 #if VMA_STATS_STRING_ENABLED
    4789  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4790 #endif
    4791 
    4792  virtual bool CreateAllocationRequest(
    4793  uint32_t currentFrameIndex,
    4794  uint32_t frameInUseCount,
    4795  VkDeviceSize bufferImageGranularity,
    4796  VkDeviceSize allocSize,
    4797  VkDeviceSize allocAlignment,
    4798  bool upperAddress,
    4799  VmaSuballocationType allocType,
    4800  bool canMakeOtherLost,
    4801  uint32_t strategy,
    4802  VmaAllocationRequest* pAllocationRequest);
    4803 
    4804  virtual bool MakeRequestedAllocationsLost(
    4805  uint32_t currentFrameIndex,
    4806  uint32_t frameInUseCount,
    4807  VmaAllocationRequest* pAllocationRequest);
    4808 
    4809  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4810 
    4811  virtual VkResult CheckCorruption(const void* pBlockData);
    4812 
    4813  virtual void Alloc(
    4814  const VmaAllocationRequest& request,
    4815  VmaSuballocationType type,
    4816  VkDeviceSize allocSize,
    4817  bool upperAddress,
    4818  VmaAllocation hAllocation);
    4819 
    4820  virtual void Free(const VmaAllocation allocation);
    4821  virtual void FreeAtOffset(VkDeviceSize offset);
    4822 
    4823 private:
    4824  uint32_t m_FreeCount;
    4825  VkDeviceSize m_SumFreeSize;
    4826  VmaSuballocationList m_Suballocations;
    4827  // Suballocations that are free and have size greater than certain threshold.
    4828  // Sorted by size, ascending.
    4829  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4830 
    4831  bool ValidateFreeSuballocationList() const;
    4832 
    4833  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4834  // If yes, fills pOffset and returns true. If no, returns false.
    4835  bool CheckAllocation(
    4836  uint32_t currentFrameIndex,
    4837  uint32_t frameInUseCount,
    4838  VkDeviceSize bufferImageGranularity,
    4839  VkDeviceSize allocSize,
    4840  VkDeviceSize allocAlignment,
    4841  VmaSuballocationType allocType,
    4842  VmaSuballocationList::const_iterator suballocItem,
    4843  bool canMakeOtherLost,
    4844  VkDeviceSize* pOffset,
    4845  size_t* itemsToMakeLostCount,
    4846  VkDeviceSize* pSumFreeSize,
    4847  VkDeviceSize* pSumItemSize) const;
    4848  // Given free suballocation, it merges it with following one, which must also be free.
    4849  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4850  // Releases given suballocation, making it free.
    4851  // Merges it with adjacent free suballocations if applicable.
    4852  // Returns iterator to new free suballocation at this place.
    4853  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4854  // Given free suballocation, it inserts it into sorted list of
    4855  // m_FreeSuballocationsBySize if it's suitable.
    4856  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4857  // Given free suballocation, it removes it from sorted list of
    4858  // m_FreeSuballocationsBySize if it's suitable.
    4859  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4860 };
    4861 
    4862 /*
    4863 Allocations and their references in internal data structure look like this:
    4864 
    4865 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4866 
    4867  0 +-------+
    4868  | |
    4869  | |
    4870  | |
    4871  +-------+
    4872  | Alloc | 1st[m_1stNullItemsBeginCount]
    4873  +-------+
    4874  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4875  +-------+
    4876  | ... |
    4877  +-------+
    4878  | Alloc | 1st[1st.size() - 1]
    4879  +-------+
    4880  | |
    4881  | |
    4882  | |
    4883 GetSize() +-------+
    4884 
    4885 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4886 
    4887  0 +-------+
    4888  | Alloc | 2nd[0]
    4889  +-------+
    4890  | Alloc | 2nd[1]
    4891  +-------+
    4892  | ... |
    4893  +-------+
    4894  | Alloc | 2nd[2nd.size() - 1]
    4895  +-------+
    4896  | |
    4897  | |
    4898  | |
    4899  +-------+
    4900  | Alloc | 1st[m_1stNullItemsBeginCount]
    4901  +-------+
    4902  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4903  +-------+
    4904  | ... |
    4905  +-------+
    4906  | Alloc | 1st[1st.size() - 1]
    4907  +-------+
    4908  | |
    4909 GetSize() +-------+
    4910 
    4911 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4912 
    4913  0 +-------+
    4914  | |
    4915  | |
    4916  | |
    4917  +-------+
    4918  | Alloc | 1st[m_1stNullItemsBeginCount]
    4919  +-------+
    4920  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4921  +-------+
    4922  | ... |
    4923  +-------+
    4924  | Alloc | 1st[1st.size() - 1]
    4925  +-------+
    4926  | |
    4927  | |
    4928  | |
    4929  +-------+
    4930  | Alloc | 2nd[2nd.size() - 1]
    4931  +-------+
    4932  | ... |
    4933  +-------+
    4934  | Alloc | 2nd[1]
    4935  +-------+
    4936  | Alloc | 2nd[0]
    4937 GetSize() +-------+
    4938 
    4939 */
    4940 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4941 {
    4942  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4943 public:
    4944  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4945  virtual ~VmaBlockMetadata_Linear();
    4946  virtual void Init(VkDeviceSize size);
    4947 
    4948  virtual bool Validate() const;
    4949  virtual size_t GetAllocationCount() const;
    4950  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4951  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4952  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4953 
    4954  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4955  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4956 
    4957 #if VMA_STATS_STRING_ENABLED
    4958  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4959 #endif
    4960 
    4961  virtual bool CreateAllocationRequest(
    4962  uint32_t currentFrameIndex,
    4963  uint32_t frameInUseCount,
    4964  VkDeviceSize bufferImageGranularity,
    4965  VkDeviceSize allocSize,
    4966  VkDeviceSize allocAlignment,
    4967  bool upperAddress,
    4968  VmaSuballocationType allocType,
    4969  bool canMakeOtherLost,
    4970  uint32_t strategy,
    4971  VmaAllocationRequest* pAllocationRequest);
    4972 
    4973  virtual bool MakeRequestedAllocationsLost(
    4974  uint32_t currentFrameIndex,
    4975  uint32_t frameInUseCount,
    4976  VmaAllocationRequest* pAllocationRequest);
    4977 
    4978  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4979 
    4980  virtual VkResult CheckCorruption(const void* pBlockData);
    4981 
    4982  virtual void Alloc(
    4983  const VmaAllocationRequest& request,
    4984  VmaSuballocationType type,
    4985  VkDeviceSize allocSize,
    4986  bool upperAddress,
    4987  VmaAllocation hAllocation);
    4988 
    4989  virtual void Free(const VmaAllocation allocation);
    4990  virtual void FreeAtOffset(VkDeviceSize offset);
    4991 
    4992 private:
    4993  /*
    4994  There are two suballocation vectors, used in ping-pong way.
    4995  The one with index m_1stVectorIndex is called 1st.
    4996  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    4997  2nd can be non-empty only when 1st is not empty.
    4998  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    4999  */
    5000  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5001 
    5002  enum SECOND_VECTOR_MODE
    5003  {
    5004  SECOND_VECTOR_EMPTY,
    5005  /*
    5006  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5007  all have smaller offset.
    5008  */
    5009  SECOND_VECTOR_RING_BUFFER,
    5010  /*
    5011  Suballocations in 2nd vector are upper side of double stack.
    5012  They all have offsets higher than those in 1st vector.
    5013  Top of this stack means smaller offsets, but higher indices in this vector.
    5014  */
    5015  SECOND_VECTOR_DOUBLE_STACK,
    5016  };
    5017 
    5018  VkDeviceSize m_SumFreeSize;
    5019  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5020  uint32_t m_1stVectorIndex;
    5021  SECOND_VECTOR_MODE m_2ndVectorMode;
    5022 
    5023  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5024  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5025  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5026  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5027 
    5028  // Number of items in 1st vector with hAllocation = null at the beginning.
    5029  size_t m_1stNullItemsBeginCount;
    5030  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5031  size_t m_1stNullItemsMiddleCount;
    5032  // Number of items in 2nd vector with hAllocation = null.
    5033  size_t m_2ndNullItemsCount;
    5034 
    5035  bool ShouldCompact1st() const;
    5036  void CleanupAfterFree();
    5037 };
    5038 
    5039 /*
    5040 - GetSize() is the original size of allocated memory block.
    5041 - m_UsableSize is this size aligned down to a power of two.
    5042  All allocations and calculations happen relative to m_UsableSize.
    5043 - GetUnusableSize() is the difference between them.
    5044  It is repoted as separate, unused range, not available for allocations.
    5045 
    5046 Node at level 0 has size = m_UsableSize.
    5047 Each next level contains nodes with size 2 times smaller than current level.
    5048 m_LevelCount is the maximum number of levels to use in the current object.
    5049 */
    5050 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5051 {
    5052  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5053 public:
    5054  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5055  virtual ~VmaBlockMetadata_Buddy();
    5056  virtual void Init(VkDeviceSize size);
    5057 
    5058  virtual bool Validate() const;
    5059  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5060  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5061  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5062  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5063 
    5064  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5065  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5066 
    5067 #if VMA_STATS_STRING_ENABLED
    5068  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5069 #endif
    5070 
    5071  virtual bool CreateAllocationRequest(
    5072  uint32_t currentFrameIndex,
    5073  uint32_t frameInUseCount,
    5074  VkDeviceSize bufferImageGranularity,
    5075  VkDeviceSize allocSize,
    5076  VkDeviceSize allocAlignment,
    5077  bool upperAddress,
    5078  VmaSuballocationType allocType,
    5079  bool canMakeOtherLost,
    5080  uint32_t strategy,
    5081  VmaAllocationRequest* pAllocationRequest);
    5082 
    5083  virtual bool MakeRequestedAllocationsLost(
    5084  uint32_t currentFrameIndex,
    5085  uint32_t frameInUseCount,
    5086  VmaAllocationRequest* pAllocationRequest);
    5087 
    5088  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5089 
    5090  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5091 
    5092  virtual void Alloc(
    5093  const VmaAllocationRequest& request,
    5094  VmaSuballocationType type,
    5095  VkDeviceSize allocSize,
    5096  bool upperAddress,
    5097  VmaAllocation hAllocation);
    5098 
    5099  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5100  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5101 
    5102 private:
    5103  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5104  static const size_t MAX_LEVELS = 30;
    5105 
    5106  struct ValidationContext
    5107  {
    5108  size_t calculatedAllocationCount;
    5109  size_t calculatedFreeCount;
    5110  VkDeviceSize calculatedSumFreeSize;
    5111 
    5112  ValidationContext() :
    5113  calculatedAllocationCount(0),
    5114  calculatedFreeCount(0),
    5115  calculatedSumFreeSize(0) { }
    5116  };
    5117 
    5118  struct Node
    5119  {
    5120  VkDeviceSize offset;
    5121  enum TYPE
    5122  {
    5123  TYPE_FREE,
    5124  TYPE_ALLOCATION,
    5125  TYPE_SPLIT,
    5126  TYPE_COUNT
    5127  } type;
    5128  Node* parent;
    5129  Node* buddy;
    5130 
    5131  union
    5132  {
    5133  struct
    5134  {
    5135  Node* prev;
    5136  Node* next;
    5137  } free;
    5138  struct
    5139  {
    5140  VmaAllocation alloc;
    5141  } allocation;
    5142  struct
    5143  {
    5144  Node* leftChild;
    5145  } split;
    5146  };
    5147  };
    5148 
    5149  // Size of the memory block aligned down to a power of two.
    5150  VkDeviceSize m_UsableSize;
    5151  uint32_t m_LevelCount;
    5152 
    5153  Node* m_Root;
    5154  struct {
    5155  Node* front;
    5156  Node* back;
    5157  } m_FreeList[MAX_LEVELS];
    5158  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5159  size_t m_AllocationCount;
    5160  // Number of nodes in the tree with type == TYPE_FREE.
    5161  size_t m_FreeCount;
    5162  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5163  VkDeviceSize m_SumFreeSize;
    5164 
    5165  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5166  void DeleteNode(Node* node);
    5167  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5168  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5169  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5170  // Alloc passed just for validation. Can be null.
    5171  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5172  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5173  // Adds node to the front of FreeList at given level.
    5174  // node->type must be FREE.
    5175  // node->free.prev, next can be undefined.
    5176  void AddToFreeListFront(uint32_t level, Node* node);
    5177  // Removes node from FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next stay untouched.
    5180  void RemoveFromFreeList(uint32_t level, Node* node);
    5181 
    5182 #if VMA_STATS_STRING_ENABLED
    5183  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5184 #endif
    5185 };
    5186 
    5187 /*
    5188 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5189 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5190 
    5191 Thread-safety: This class must be externally synchronized.
    5192 */
    5193 class VmaDeviceMemoryBlock
    5194 {
    5195  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5196 public:
    5197  VmaBlockMetadata* m_pMetadata;
    5198 
    5199  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5200 
    5201  ~VmaDeviceMemoryBlock()
    5202  {
    5203  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5204  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5205  }
    5206 
    5207  // Always call after construction.
    5208  void Init(
    5209  VmaAllocator hAllocator,
    5210  uint32_t newMemoryTypeIndex,
    5211  VkDeviceMemory newMemory,
    5212  VkDeviceSize newSize,
    5213  uint32_t id,
    5214  uint32_t algorithm);
    5215  // Always call before destruction.
    5216  void Destroy(VmaAllocator allocator);
    5217 
    5218  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5219  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5220  uint32_t GetId() const { return m_Id; }
    5221  void* GetMappedData() const { return m_pMappedData; }
    5222 
    5223  // Validates all data structures inside this object. If not valid, returns false.
    5224  bool Validate() const;
    5225 
    5226  VkResult CheckCorruption(VmaAllocator hAllocator);
    5227 
    5228  // ppData can be null.
    5229  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5230  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5231 
    5232  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5233  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5234 
    5235  VkResult BindBufferMemory(
    5236  const VmaAllocator hAllocator,
    5237  const VmaAllocation hAllocation,
    5238  VkBuffer hBuffer);
    5239  VkResult BindImageMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkImage hImage);
    5243 
    5244 private:
    5245  uint32_t m_MemoryTypeIndex;
    5246  uint32_t m_Id;
    5247  VkDeviceMemory m_hMemory;
    5248 
    5249  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5250  // Also protects m_MapCount, m_pMappedData.
    5251  VMA_MUTEX m_Mutex;
    5252  uint32_t m_MapCount;
    5253  void* m_pMappedData;
    5254 };
    5255 
    5256 struct VmaPointerLess
    5257 {
    5258  bool operator()(const void* lhs, const void* rhs) const
    5259  {
    5260  return lhs < rhs;
    5261  }
    5262 };
    5263 
    5264 class VmaDefragmentator;
    5265 
    5266 /*
    5267 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5268 Vulkan memory type.
    5269 
    5270 Synchronized internally with a mutex.
    5271 */
    5272 struct VmaBlockVector
    5273 {
    5274  VMA_CLASS_NO_COPY(VmaBlockVector)
    5275 public:
    5276  VmaBlockVector(
    5277  VmaAllocator hAllocator,
    5278  uint32_t memoryTypeIndex,
    5279  VkDeviceSize preferredBlockSize,
    5280  size_t minBlockCount,
    5281  size_t maxBlockCount,
    5282  VkDeviceSize bufferImageGranularity,
    5283  uint32_t frameInUseCount,
    5284  bool isCustomPool,
    5285  bool explicitBlockSize,
    5286  uint32_t algorithm);
    5287  ~VmaBlockVector();
    5288 
    5289  VkResult CreateMinBlocks();
    5290 
    5291  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5292  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5293  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5294  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5295  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5296 
    5297  void GetPoolStats(VmaPoolStats* pStats);
    5298 
    5299  bool IsEmpty() const { return m_Blocks.empty(); }
    5300  bool IsCorruptionDetectionEnabled() const;
    5301 
    5302  VkResult Allocate(
    5303  VmaPool hCurrentPool,
    5304  uint32_t currentFrameIndex,
    5305  VkDeviceSize size,
    5306  VkDeviceSize alignment,
    5307  const VmaAllocationCreateInfo& createInfo,
    5308  VmaSuballocationType suballocType,
    5309  VmaAllocation* pAllocation);
    5310 
    5311  void Free(
    5312  VmaAllocation hAllocation);
    5313 
    5314  // Adds statistics of this BlockVector to pStats.
    5315  void AddStats(VmaStats* pStats);
    5316 
    5317 #if VMA_STATS_STRING_ENABLED
    5318  void PrintDetailedMap(class VmaJsonWriter& json);
    5319 #endif
    5320 
    5321  void MakePoolAllocationsLost(
    5322  uint32_t currentFrameIndex,
    5323  size_t* pLostAllocationCount);
    5324  VkResult CheckCorruption();
    5325 
    5326  VmaDefragmentator* EnsureDefragmentator(
    5327  VmaAllocator hAllocator,
    5328  uint32_t currentFrameIndex);
    5329 
    5330  VkResult Defragment(
    5331  VmaDefragmentationStats* pDefragmentationStats,
    5332  VkDeviceSize& maxBytesToMove,
    5333  uint32_t& maxAllocationsToMove);
    5334 
    5335  void DestroyDefragmentator();
    5336 
    5337 private:
    5338  friend class VmaDefragmentator;
    5339 
    5340  const VmaAllocator m_hAllocator;
    5341  const uint32_t m_MemoryTypeIndex;
    5342  const VkDeviceSize m_PreferredBlockSize;
    5343  const size_t m_MinBlockCount;
    5344  const size_t m_MaxBlockCount;
    5345  const VkDeviceSize m_BufferImageGranularity;
    5346  const uint32_t m_FrameInUseCount;
    5347  const bool m_IsCustomPool;
    5348  const bool m_ExplicitBlockSize;
    5349  const uint32_t m_Algorithm;
    5350  bool m_HasEmptyBlock;
    5351  VMA_MUTEX m_Mutex;
    5352  // Incrementally sorted by sumFreeSize, ascending.
    5353  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5354  /* There can be at most one allocation that is completely empty - a
    5355  hysteresis to avoid pessimistic case of alternating creation and destruction
    5356  of a VkDeviceMemory. */
    5357  VmaDefragmentator* m_pDefragmentator;
    5358  uint32_t m_NextBlockId;
    5359 
    5360  VkDeviceSize CalcMaxBlockSize() const;
    5361 
    5362  // Finds and removes given block from vector.
    5363  void Remove(VmaDeviceMemoryBlock* pBlock);
    5364 
    5365  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5366  // after this call.
    5367  void IncrementallySortBlocks();
    5368 
    5369  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5370  VkResult AllocateFromBlock(
    5371  VmaDeviceMemoryBlock* pBlock,
    5372  VmaPool hCurrentPool,
    5373  uint32_t currentFrameIndex,
    5374  VkDeviceSize size,
    5375  VkDeviceSize alignment,
    5376  VmaAllocationCreateFlags allocFlags,
    5377  void* pUserData,
    5378  VmaSuballocationType suballocType,
    5379  uint32_t strategy,
    5380  VmaAllocation* pAllocation);
    5381 
    5382  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5383 };
    5384 
    5385 struct VmaPool_T
    5386 {
    5387  VMA_CLASS_NO_COPY(VmaPool_T)
    5388 public:
    5389  VmaBlockVector m_BlockVector;
    5390 
    5391  VmaPool_T(
    5392  VmaAllocator hAllocator,
    5393  const VmaPoolCreateInfo& createInfo,
    5394  VkDeviceSize preferredBlockSize);
    5395  ~VmaPool_T();
    5396 
    5397  uint32_t GetId() const { return m_Id; }
    5398  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5399 
    5400 #if VMA_STATS_STRING_ENABLED
    5401  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5402 #endif
    5403 
    5404 private:
    5405  uint32_t m_Id;
    5406 };
    5407 
    5408 class VmaDefragmentator
    5409 {
    5410  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5411 private:
    5412  const VmaAllocator m_hAllocator;
    5413  VmaBlockVector* const m_pBlockVector;
    5414  uint32_t m_CurrentFrameIndex;
    5415  VkDeviceSize m_BytesMoved;
    5416  uint32_t m_AllocationsMoved;
    5417 
    5418  struct AllocationInfo
    5419  {
    5420  VmaAllocation m_hAllocation;
    5421  VkBool32* m_pChanged;
    5422 
    5423  AllocationInfo() :
    5424  m_hAllocation(VK_NULL_HANDLE),
    5425  m_pChanged(VMA_NULL)
    5426  {
    5427  }
    5428  };
    5429 
    5430  struct AllocationInfoSizeGreater
    5431  {
    5432  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5433  {
    5434  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5435  }
    5436  };
    5437 
    5438  // Used between AddAllocation and Defragment.
    5439  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5440 
    5441  struct BlockInfo
    5442  {
    5443  VmaDeviceMemoryBlock* m_pBlock;
    5444  bool m_HasNonMovableAllocations;
    5445  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5446 
    5447  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5448  m_pBlock(VMA_NULL),
    5449  m_HasNonMovableAllocations(true),
    5450  m_Allocations(pAllocationCallbacks),
    5451  m_pMappedDataForDefragmentation(VMA_NULL)
    5452  {
    5453  }
    5454 
    5455  void CalcHasNonMovableAllocations()
    5456  {
    5457  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5458  const size_t defragmentAllocCount = m_Allocations.size();
    5459  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5460  }
    5461 
    5462  void SortAllocationsBySizeDescecnding()
    5463  {
    5464  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5465  }
    5466 
    5467  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5468  void Unmap(VmaAllocator hAllocator);
    5469 
    5470  private:
    5471  // Not null if mapped for defragmentation only, not originally mapped.
    5472  void* m_pMappedDataForDefragmentation;
    5473  };
    5474 
    5475  struct BlockPointerLess
    5476  {
    5477  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5478  {
    5479  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5480  }
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5484  }
    5485  };
    5486 
    5487  // 1. Blocks with some non-movable allocations go first.
    5488  // 2. Blocks with smaller sumFreeSize go first.
    5489  struct BlockInfoCompareMoveDestination
    5490  {
    5491  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5492  {
    5493  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5494  {
    5495  return true;
    5496  }
    5497  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return false;
    5500  }
    5501  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5502  {
    5503  return true;
    5504  }
    5505  return false;
    5506  }
    5507  };
    5508 
    5509  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5510  BlockInfoVector m_Blocks;
    5511 
    5512  VkResult DefragmentRound(
    5513  VkDeviceSize maxBytesToMove,
    5514  uint32_t maxAllocationsToMove);
    5515 
    5516  static bool MoveMakesSense(
    5517  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5518  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5519 
    5520 public:
    5521  VmaDefragmentator(
    5522  VmaAllocator hAllocator,
    5523  VmaBlockVector* pBlockVector,
    5524  uint32_t currentFrameIndex);
    5525 
    5526  ~VmaDefragmentator();
    5527 
    5528  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5529  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5530 
    5531  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5532 
    5533  VkResult Defragment(
    5534  VkDeviceSize maxBytesToMove,
    5535  uint32_t maxAllocationsToMove);
    5536 };
    5537 
    5538 #if VMA_RECORDING_ENABLED
    5539 
    5540 class VmaRecorder
    5541 {
    5542 public:
    5543  VmaRecorder();
    5544  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5545  void WriteConfiguration(
    5546  const VkPhysicalDeviceProperties& devProps,
    5547  const VkPhysicalDeviceMemoryProperties& memProps,
    5548  bool dedicatedAllocationExtensionEnabled);
    5549  ~VmaRecorder();
    5550 
    5551  void RecordCreateAllocator(uint32_t frameIndex);
    5552  void RecordDestroyAllocator(uint32_t frameIndex);
    5553  void RecordCreatePool(uint32_t frameIndex,
    5554  const VmaPoolCreateInfo& createInfo,
    5555  VmaPool pool);
    5556  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5557  void RecordAllocateMemory(uint32_t frameIndex,
    5558  const VkMemoryRequirements& vkMemReq,
    5559  const VmaAllocationCreateInfo& createInfo,
    5560  VmaAllocation allocation);
    5561  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  bool requiresDedicatedAllocation,
    5564  bool prefersDedicatedAllocation,
    5565  const VmaAllocationCreateInfo& createInfo,
    5566  VmaAllocation allocation);
    5567  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5568  const VkMemoryRequirements& vkMemReq,
    5569  bool requiresDedicatedAllocation,
    5570  bool prefersDedicatedAllocation,
    5571  const VmaAllocationCreateInfo& createInfo,
    5572  VmaAllocation allocation);
    5573  void RecordFreeMemory(uint32_t frameIndex,
    5574  VmaAllocation allocation);
    5575  void RecordSetAllocationUserData(uint32_t frameIndex,
    5576  VmaAllocation allocation,
    5577  const void* pUserData);
    5578  void RecordCreateLostAllocation(uint32_t frameIndex,
    5579  VmaAllocation allocation);
    5580  void RecordMapMemory(uint32_t frameIndex,
    5581  VmaAllocation allocation);
    5582  void RecordUnmapMemory(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordFlushAllocation(uint32_t frameIndex,
    5585  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5586  void RecordInvalidateAllocation(uint32_t frameIndex,
    5587  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5588  void RecordCreateBuffer(uint32_t frameIndex,
    5589  const VkBufferCreateInfo& bufCreateInfo,
    5590  const VmaAllocationCreateInfo& allocCreateInfo,
    5591  VmaAllocation allocation);
    5592  void RecordCreateImage(uint32_t frameIndex,
    5593  const VkImageCreateInfo& imageCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordDestroyBuffer(uint32_t frameIndex,
    5597  VmaAllocation allocation);
    5598  void RecordDestroyImage(uint32_t frameIndex,
    5599  VmaAllocation allocation);
    5600  void RecordTouchAllocation(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordGetAllocationInfo(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5605  VmaPool pool);
    5606 
    5607 private:
    5608  struct CallParams
    5609  {
    5610  uint32_t threadId;
    5611  double time;
    5612  };
    5613 
    5614  class UserDataString
    5615  {
    5616  public:
    5617  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5618  const char* GetString() const { return m_Str; }
    5619 
    5620  private:
    5621  char m_PtrStr[17];
    5622  const char* m_Str;
    5623  };
    5624 
    5625  bool m_UseMutex;
    5626  VmaRecordFlags m_Flags;
    5627  FILE* m_File;
    5628  VMA_MUTEX m_FileMutex;
    5629  int64_t m_Freq;
    5630  int64_t m_StartCounter;
    5631 
    5632  void GetBasicParams(CallParams& outParams);
    5633  void Flush();
    5634 };
    5635 
    5636 #endif // #if VMA_RECORDING_ENABLED
    5637 
    5638 // Main allocator object.
    5639 struct VmaAllocator_T
    5640 {
    5641  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5642 public:
    5643  bool m_UseMutex;
    5644  bool m_UseKhrDedicatedAllocation;
    5645  VkDevice m_hDevice;
    5646  bool m_AllocationCallbacksSpecified;
    5647  VkAllocationCallbacks m_AllocationCallbacks;
    5648  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5649 
    5650  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5651  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5652  VMA_MUTEX m_HeapSizeLimitMutex;
    5653 
    5654  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5655  VkPhysicalDeviceMemoryProperties m_MemProps;
    5656 
    5657  // Default pools.
    5658  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5659 
    5660  // Each vector is sorted by memory (handle value).
    5661  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5662  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5663  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5664 
    5665  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5666  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5667  ~VmaAllocator_T();
    5668 
    5669  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5670  {
    5671  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5672  }
    5673  const VmaVulkanFunctions& GetVulkanFunctions() const
    5674  {
    5675  return m_VulkanFunctions;
    5676  }
    5677 
    5678  VkDeviceSize GetBufferImageGranularity() const
    5679  {
    5680  return VMA_MAX(
    5681  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5682  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5683  }
    5684 
    5685  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5686  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5687 
    5688  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5689  {
    5690  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5691  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5692  }
    5693  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5694  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5695  {
    5696  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5697  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5698  }
    5699  // Minimum alignment for all allocations in specific memory type.
    5700  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5701  {
    5702  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5703  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5704  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5705  }
    5706 
    5707  bool IsIntegratedGpu() const
    5708  {
    5709  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5710  }
    5711 
    5712 #if VMA_RECORDING_ENABLED
    5713  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5714 #endif
    5715 
    5716  void GetBufferMemoryRequirements(
    5717  VkBuffer hBuffer,
    5718  VkMemoryRequirements& memReq,
    5719  bool& requiresDedicatedAllocation,
    5720  bool& prefersDedicatedAllocation) const;
    5721  void GetImageMemoryRequirements(
    5722  VkImage hImage,
    5723  VkMemoryRequirements& memReq,
    5724  bool& requiresDedicatedAllocation,
    5725  bool& prefersDedicatedAllocation) const;
    5726 
    5727  // Main allocation function.
    5728  VkResult AllocateMemory(
    5729  const VkMemoryRequirements& vkMemReq,
    5730  bool requiresDedicatedAllocation,
    5731  bool prefersDedicatedAllocation,
    5732  VkBuffer dedicatedBuffer,
    5733  VkImage dedicatedImage,
    5734  const VmaAllocationCreateInfo& createInfo,
    5735  VmaSuballocationType suballocType,
    5736  VmaAllocation* pAllocation);
    5737 
    5738  // Main deallocation function.
    5739  void FreeMemory(const VmaAllocation allocation);
    5740 
    5741  void CalculateStats(VmaStats* pStats);
    5742 
    5743 #if VMA_STATS_STRING_ENABLED
    5744  void PrintDetailedMap(class VmaJsonWriter& json);
    5745 #endif
    5746 
    5747  VkResult Defragment(
    5748  VmaAllocation* pAllocations,
    5749  size_t allocationCount,
    5750  VkBool32* pAllocationsChanged,
    5751  const VmaDefragmentationInfo* pDefragmentationInfo,
    5752  VmaDefragmentationStats* pDefragmentationStats);
    5753 
    5754  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5755  bool TouchAllocation(VmaAllocation hAllocation);
    5756 
    5757  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5758  void DestroyPool(VmaPool pool);
    5759  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5760 
    5761  void SetCurrentFrameIndex(uint32_t frameIndex);
    5762  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5763 
    5764  void MakePoolAllocationsLost(
    5765  VmaPool hPool,
    5766  size_t* pLostAllocationCount);
    5767  VkResult CheckPoolCorruption(VmaPool hPool);
    5768  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5769 
    5770  void CreateLostAllocation(VmaAllocation* pAllocation);
    5771 
    5772  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5773  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5774 
    5775  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5776  void Unmap(VmaAllocation hAllocation);
    5777 
    5778  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5779  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5780 
    5781  void FlushOrInvalidateAllocation(
    5782  VmaAllocation hAllocation,
    5783  VkDeviceSize offset, VkDeviceSize size,
    5784  VMA_CACHE_OPERATION op);
    5785 
    5786  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5787 
    5788 private:
    5789  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5790 
    5791  VkPhysicalDevice m_PhysicalDevice;
    5792  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5793 
    5794  VMA_MUTEX m_PoolsMutex;
    5795  // Protected by m_PoolsMutex. Sorted by pointer value.
    5796  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5797  uint32_t m_NextPoolId;
    5798 
    5799  VmaVulkanFunctions m_VulkanFunctions;
    5800 
    5801 #if VMA_RECORDING_ENABLED
    5802  VmaRecorder* m_pRecorder;
    5803 #endif
    5804 
    5805  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5806 
    5807  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5808 
    5809  VkResult AllocateMemoryOfType(
    5810  VkDeviceSize size,
    5811  VkDeviceSize alignment,
    5812  bool dedicatedAllocation,
    5813  VkBuffer dedicatedBuffer,
    5814  VkImage dedicatedImage,
    5815  const VmaAllocationCreateInfo& createInfo,
    5816  uint32_t memTypeIndex,
    5817  VmaSuballocationType suballocType,
    5818  VmaAllocation* pAllocation);
    5819 
    5820  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5821  VkResult AllocateDedicatedMemory(
    5822  VkDeviceSize size,
    5823  VmaSuballocationType suballocType,
    5824  uint32_t memTypeIndex,
    5825  bool map,
    5826  bool isUserDataString,
    5827  void* pUserData,
    5828  VkBuffer dedicatedBuffer,
    5829  VkImage dedicatedImage,
    5830  VmaAllocation* pAllocation);
    5831 
    5832  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5833  void FreeDedicatedMemory(VmaAllocation allocation);
    5834 };
    5835 
    5837 // Memory allocation #2 after VmaAllocator_T definition
    5838 
    5839 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5840 {
    5841  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5842 }
    5843 
    5844 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5845 {
    5846  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5847 }
    5848 
    5849 template<typename T>
    5850 static T* VmaAllocate(VmaAllocator hAllocator)
    5851 {
    5852  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5853 }
    5854 
    5855 template<typename T>
    5856 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5857 {
    5858  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5859 }
    5860 
    5861 template<typename T>
    5862 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5863 {
    5864  if(ptr != VMA_NULL)
    5865  {
    5866  ptr->~T();
    5867  VmaFree(hAllocator, ptr);
    5868  }
    5869 }
    5870 
    5871 template<typename T>
    5872 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5873 {
    5874  if(ptr != VMA_NULL)
    5875  {
    5876  for(size_t i = count; i--; )
    5877  ptr[i].~T();
    5878  VmaFree(hAllocator, ptr);
    5879  }
    5880 }
    5881 
    5883 // VmaStringBuilder
    5884 
    5885 #if VMA_STATS_STRING_ENABLED
    5886 
    5887 class VmaStringBuilder
    5888 {
    5889 public:
    5890  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5891  size_t GetLength() const { return m_Data.size(); }
    5892  const char* GetData() const { return m_Data.data(); }
    5893 
    5894  void Add(char ch) { m_Data.push_back(ch); }
    5895  void Add(const char* pStr);
    5896  void AddNewLine() { Add('\n'); }
    5897  void AddNumber(uint32_t num);
    5898  void AddNumber(uint64_t num);
    5899  void AddPointer(const void* ptr);
    5900 
    5901 private:
    5902  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5903 };
    5904 
    5905 void VmaStringBuilder::Add(const char* pStr)
    5906 {
    5907  const size_t strLen = strlen(pStr);
    5908  if(strLen > 0)
    5909  {
    5910  const size_t oldCount = m_Data.size();
    5911  m_Data.resize(oldCount + strLen);
    5912  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5913  }
    5914 }
    5915 
    5916 void VmaStringBuilder::AddNumber(uint32_t num)
    5917 {
    5918  char buf[11];
    5919  VmaUint32ToStr(buf, sizeof(buf), num);
    5920  Add(buf);
    5921 }
    5922 
    5923 void VmaStringBuilder::AddNumber(uint64_t num)
    5924 {
    5925  char buf[21];
    5926  VmaUint64ToStr(buf, sizeof(buf), num);
    5927  Add(buf);
    5928 }
    5929 
    5930 void VmaStringBuilder::AddPointer(const void* ptr)
    5931 {
    5932  char buf[21];
    5933  VmaPtrToStr(buf, sizeof(buf), ptr);
    5934  Add(buf);
    5935 }
    5936 
    5937 #endif // #if VMA_STATS_STRING_ENABLED
    5938 
    5940 // VmaJsonWriter
    5941 
    5942 #if VMA_STATS_STRING_ENABLED
    5943 
    5944 class VmaJsonWriter
    5945 {
    5946  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5947 public:
    5948  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5949  ~VmaJsonWriter();
    5950 
    5951  void BeginObject(bool singleLine = false);
    5952  void EndObject();
    5953 
    5954  void BeginArray(bool singleLine = false);
    5955  void EndArray();
    5956 
    5957  void WriteString(const char* pStr);
    5958  void BeginString(const char* pStr = VMA_NULL);
    5959  void ContinueString(const char* pStr);
    5960  void ContinueString(uint32_t n);
    5961  void ContinueString(uint64_t n);
    5962  void ContinueString_Pointer(const void* ptr);
    5963  void EndString(const char* pStr = VMA_NULL);
    5964 
    5965  void WriteNumber(uint32_t n);
    5966  void WriteNumber(uint64_t n);
    5967  void WriteBool(bool b);
    5968  void WriteNull();
    5969 
    5970 private:
    5971  static const char* const INDENT;
    5972 
    5973  enum COLLECTION_TYPE
    5974  {
    5975  COLLECTION_TYPE_OBJECT,
    5976  COLLECTION_TYPE_ARRAY,
    5977  };
    5978  struct StackItem
    5979  {
    5980  COLLECTION_TYPE type;
    5981  uint32_t valueCount;
    5982  bool singleLineMode;
    5983  };
    5984 
    5985  VmaStringBuilder& m_SB;
    5986  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5987  bool m_InsideString;
    5988 
    5989  void BeginValue(bool isString);
    5990  void WriteIndent(bool oneLess = false);
    5991 };
    5992 
    5993 const char* const VmaJsonWriter::INDENT = " ";
    5994 
    5995 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    5996  m_SB(sb),
    5997  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    5998  m_InsideString(false)
    5999 {
    6000 }
    6001 
    6002 VmaJsonWriter::~VmaJsonWriter()
    6003 {
    6004  VMA_ASSERT(!m_InsideString);
    6005  VMA_ASSERT(m_Stack.empty());
    6006 }
    6007 
    6008 void VmaJsonWriter::BeginObject(bool singleLine)
    6009 {
    6010  VMA_ASSERT(!m_InsideString);
    6011 
    6012  BeginValue(false);
    6013  m_SB.Add('{');
    6014 
    6015  StackItem item;
    6016  item.type = COLLECTION_TYPE_OBJECT;
    6017  item.valueCount = 0;
    6018  item.singleLineMode = singleLine;
    6019  m_Stack.push_back(item);
    6020 }
    6021 
    6022 void VmaJsonWriter::EndObject()
    6023 {
    6024  VMA_ASSERT(!m_InsideString);
    6025 
    6026  WriteIndent(true);
    6027  m_SB.Add('}');
    6028 
    6029  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6030  m_Stack.pop_back();
    6031 }
    6032 
    6033 void VmaJsonWriter::BeginArray(bool singleLine)
    6034 {
    6035  VMA_ASSERT(!m_InsideString);
    6036 
    6037  BeginValue(false);
    6038  m_SB.Add('[');
    6039 
    6040  StackItem item;
    6041  item.type = COLLECTION_TYPE_ARRAY;
    6042  item.valueCount = 0;
    6043  item.singleLineMode = singleLine;
    6044  m_Stack.push_back(item);
    6045 }
    6046 
    6047 void VmaJsonWriter::EndArray()
    6048 {
    6049  VMA_ASSERT(!m_InsideString);
    6050 
    6051  WriteIndent(true);
    6052  m_SB.Add(']');
    6053 
    6054  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6055  m_Stack.pop_back();
    6056 }
    6057 
    6058 void VmaJsonWriter::WriteString(const char* pStr)
    6059 {
    6060  BeginString(pStr);
    6061  EndString();
    6062 }
    6063 
    6064 void VmaJsonWriter::BeginString(const char* pStr)
    6065 {
    6066  VMA_ASSERT(!m_InsideString);
    6067 
    6068  BeginValue(true);
    6069  m_SB.Add('"');
    6070  m_InsideString = true;
    6071  if(pStr != VMA_NULL && pStr[0] != '\0')
    6072  {
    6073  ContinueString(pStr);
    6074  }
    6075 }
    6076 
    6077 void VmaJsonWriter::ContinueString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(m_InsideString);
    6080 
    6081  const size_t strLen = strlen(pStr);
    6082  for(size_t i = 0; i < strLen; ++i)
    6083  {
    6084  char ch = pStr[i];
    6085  if(ch == '\\')
    6086  {
    6087  m_SB.Add("\\\\");
    6088  }
    6089  else if(ch == '"')
    6090  {
    6091  m_SB.Add("\\\"");
    6092  }
    6093  else if(ch >= 32)
    6094  {
    6095  m_SB.Add(ch);
    6096  }
    6097  else switch(ch)
    6098  {
    6099  case '\b':
    6100  m_SB.Add("\\b");
    6101  break;
    6102  case '\f':
    6103  m_SB.Add("\\f");
    6104  break;
    6105  case '\n':
    6106  m_SB.Add("\\n");
    6107  break;
    6108  case '\r':
    6109  m_SB.Add("\\r");
    6110  break;
    6111  case '\t':
    6112  m_SB.Add("\\t");
    6113  break;
    6114  default:
    6115  VMA_ASSERT(0 && "Character not currently supported.");
    6116  break;
    6117  }
    6118  }
    6119 }
    6120 
    6121 void VmaJsonWriter::ContinueString(uint32_t n)
    6122 {
    6123  VMA_ASSERT(m_InsideString);
    6124  m_SB.AddNumber(n);
    6125 }
    6126 
    6127 void VmaJsonWriter::ContinueString(uint64_t n)
    6128 {
    6129  VMA_ASSERT(m_InsideString);
    6130  m_SB.AddNumber(n);
    6131 }
    6132 
    6133 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6134 {
    6135  VMA_ASSERT(m_InsideString);
    6136  m_SB.AddPointer(ptr);
    6137 }
    6138 
    6139 void VmaJsonWriter::EndString(const char* pStr)
    6140 {
    6141  VMA_ASSERT(m_InsideString);
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146  m_SB.Add('"');
    6147  m_InsideString = false;
    6148 }
    6149 
    6150 void VmaJsonWriter::WriteNumber(uint32_t n)
    6151 {
    6152  VMA_ASSERT(!m_InsideString);
    6153  BeginValue(false);
    6154  m_SB.AddNumber(n);
    6155 }
    6156 
    6157 void VmaJsonWriter::WriteNumber(uint64_t n)
    6158 {
    6159  VMA_ASSERT(!m_InsideString);
    6160  BeginValue(false);
    6161  m_SB.AddNumber(n);
    6162 }
    6163 
    6164 void VmaJsonWriter::WriteBool(bool b)
    6165 {
    6166  VMA_ASSERT(!m_InsideString);
    6167  BeginValue(false);
    6168  m_SB.Add(b ? "true" : "false");
    6169 }
    6170 
    6171 void VmaJsonWriter::WriteNull()
    6172 {
    6173  VMA_ASSERT(!m_InsideString);
    6174  BeginValue(false);
    6175  m_SB.Add("null");
    6176 }
    6177 
    6178 void VmaJsonWriter::BeginValue(bool isString)
    6179 {
    6180  if(!m_Stack.empty())
    6181  {
    6182  StackItem& currItem = m_Stack.back();
    6183  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6184  currItem.valueCount % 2 == 0)
    6185  {
    6186  VMA_ASSERT(isString);
    6187  }
    6188 
    6189  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6190  currItem.valueCount % 2 != 0)
    6191  {
    6192  m_SB.Add(": ");
    6193  }
    6194  else if(currItem.valueCount > 0)
    6195  {
    6196  m_SB.Add(", ");
    6197  WriteIndent();
    6198  }
    6199  else
    6200  {
    6201  WriteIndent();
    6202  }
    6203  ++currItem.valueCount;
    6204  }
    6205 }
    6206 
    6207 void VmaJsonWriter::WriteIndent(bool oneLess)
    6208 {
    6209  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6210  {
    6211  m_SB.AddNewLine();
    6212 
    6213  size_t count = m_Stack.size();
    6214  if(count > 0 && oneLess)
    6215  {
    6216  --count;
    6217  }
    6218  for(size_t i = 0; i < count; ++i)
    6219  {
    6220  m_SB.Add(INDENT);
    6221  }
    6222  }
    6223 }
    6224 
    6225 #endif // #if VMA_STATS_STRING_ENABLED
    6226 
    6228 
    6229 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6230 {
    6231  if(IsUserDataString())
    6232  {
    6233  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6234 
    6235  FreeUserDataString(hAllocator);
    6236 
    6237  if(pUserData != VMA_NULL)
    6238  {
    6239  const char* const newStrSrc = (char*)pUserData;
    6240  const size_t newStrLen = strlen(newStrSrc);
    6241  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6242  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6243  m_pUserData = newStrDst;
    6244  }
    6245  }
    6246  else
    6247  {
    6248  m_pUserData = pUserData;
    6249  }
    6250 }
    6251 
    6252 void VmaAllocation_T::ChangeBlockAllocation(
    6253  VmaAllocator hAllocator,
    6254  VmaDeviceMemoryBlock* block,
    6255  VkDeviceSize offset)
    6256 {
    6257  VMA_ASSERT(block != VMA_NULL);
    6258  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6259 
    6260  // Move mapping reference counter from old block to new block.
    6261  if(block != m_BlockAllocation.m_Block)
    6262  {
    6263  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6264  if(IsPersistentMap())
    6265  ++mapRefCount;
    6266  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6267  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6268  }
    6269 
    6270  m_BlockAllocation.m_Block = block;
    6271  m_BlockAllocation.m_Offset = offset;
    6272 }
    6273 
    6274 VkDeviceSize VmaAllocation_T::GetOffset() const
    6275 {
    6276  switch(m_Type)
    6277  {
    6278  case ALLOCATION_TYPE_BLOCK:
    6279  return m_BlockAllocation.m_Offset;
    6280  case ALLOCATION_TYPE_DEDICATED:
    6281  return 0;
    6282  default:
    6283  VMA_ASSERT(0);
    6284  return 0;
    6285  }
    6286 }
    6287 
    6288 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6289 {
    6290  switch(m_Type)
    6291  {
    6292  case ALLOCATION_TYPE_BLOCK:
    6293  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6294  case ALLOCATION_TYPE_DEDICATED:
    6295  return m_DedicatedAllocation.m_hMemory;
    6296  default:
    6297  VMA_ASSERT(0);
    6298  return VK_NULL_HANDLE;
    6299  }
    6300 }
    6301 
    6302 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6303 {
    6304  switch(m_Type)
    6305  {
    6306  case ALLOCATION_TYPE_BLOCK:
    6307  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6308  case ALLOCATION_TYPE_DEDICATED:
    6309  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6310  default:
    6311  VMA_ASSERT(0);
    6312  return UINT32_MAX;
    6313  }
    6314 }
    6315 
    6316 void* VmaAllocation_T::GetMappedData() const
    6317 {
    6318  switch(m_Type)
    6319  {
    6320  case ALLOCATION_TYPE_BLOCK:
    6321  if(m_MapCount != 0)
    6322  {
    6323  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6324  VMA_ASSERT(pBlockData != VMA_NULL);
    6325  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6326  }
    6327  else
    6328  {
    6329  return VMA_NULL;
    6330  }
    6331  break;
    6332  case ALLOCATION_TYPE_DEDICATED:
    6333  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6334  return m_DedicatedAllocation.m_pMappedData;
    6335  default:
    6336  VMA_ASSERT(0);
    6337  return VMA_NULL;
    6338  }
    6339 }
    6340 
    6341 bool VmaAllocation_T::CanBecomeLost() const
    6342 {
    6343  switch(m_Type)
    6344  {
    6345  case ALLOCATION_TYPE_BLOCK:
    6346  return m_BlockAllocation.m_CanBecomeLost;
    6347  case ALLOCATION_TYPE_DEDICATED:
    6348  return false;
    6349  default:
    6350  VMA_ASSERT(0);
    6351  return false;
    6352  }
    6353 }
    6354 
    6355 VmaPool VmaAllocation_T::GetPool() const
    6356 {
    6357  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6358  return m_BlockAllocation.m_hPool;
    6359 }
    6360 
    6361 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6362 {
    6363  VMA_ASSERT(CanBecomeLost());
    6364 
    6365  /*
    6366  Warning: This is a carefully designed algorithm.
    6367  Do not modify unless you really know what you're doing :)
    6368  */
    6369  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6370  for(;;)
    6371  {
    6372  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6373  {
    6374  VMA_ASSERT(0);
    6375  return false;
    6376  }
    6377  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6378  {
    6379  return false;
    6380  }
    6381  else // Last use time earlier than current time.
    6382  {
    6383  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6384  {
    6385  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6386  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6387  return true;
    6388  }
    6389  }
    6390  }
    6391 }
    6392 
    6393 #if VMA_STATS_STRING_ENABLED
    6394 
    6395 // Correspond to values of enum VmaSuballocationType.
    6396 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6397  "FREE",
    6398  "UNKNOWN",
    6399  "BUFFER",
    6400  "IMAGE_UNKNOWN",
    6401  "IMAGE_LINEAR",
    6402  "IMAGE_OPTIMAL",
    6403 };
    6404 
    6405 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6406 {
    6407  json.WriteString("Type");
    6408  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6409 
    6410  json.WriteString("Size");
    6411  json.WriteNumber(m_Size);
    6412 
    6413  if(m_pUserData != VMA_NULL)
    6414  {
    6415  json.WriteString("UserData");
    6416  if(IsUserDataString())
    6417  {
    6418  json.WriteString((const char*)m_pUserData);
    6419  }
    6420  else
    6421  {
    6422  json.BeginString();
    6423  json.ContinueString_Pointer(m_pUserData);
    6424  json.EndString();
    6425  }
    6426  }
    6427 
    6428  json.WriteString("CreationFrameIndex");
    6429  json.WriteNumber(m_CreationFrameIndex);
    6430 
    6431  json.WriteString("LastUseFrameIndex");
    6432  json.WriteNumber(GetLastUseFrameIndex());
    6433 
    6434  if(m_BufferImageUsage != 0)
    6435  {
    6436  json.WriteString("Usage");
    6437  json.WriteNumber(m_BufferImageUsage);
    6438  }
    6439 }
    6440 
    6441 #endif
    6442 
    6443 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6444 {
    6445  VMA_ASSERT(IsUserDataString());
    6446  if(m_pUserData != VMA_NULL)
    6447  {
    6448  char* const oldStr = (char*)m_pUserData;
    6449  const size_t oldStrLen = strlen(oldStr);
    6450  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6451  m_pUserData = VMA_NULL;
    6452  }
    6453 }
    6454 
    6455 void VmaAllocation_T::BlockAllocMap()
    6456 {
    6457  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6458 
    6459  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6460  {
    6461  ++m_MapCount;
    6462  }
    6463  else
    6464  {
    6465  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6466  }
    6467 }
    6468 
    6469 void VmaAllocation_T::BlockAllocUnmap()
    6470 {
    6471  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6472 
    6473  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6474  {
    6475  --m_MapCount;
    6476  }
    6477  else
    6478  {
    6479  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6480  }
    6481 }
    6482 
    6483 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6484 {
    6485  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6486 
    6487  if(m_MapCount != 0)
    6488  {
    6489  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6490  {
    6491  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6492  *ppData = m_DedicatedAllocation.m_pMappedData;
    6493  ++m_MapCount;
    6494  return VK_SUCCESS;
    6495  }
    6496  else
    6497  {
    6498  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6499  return VK_ERROR_MEMORY_MAP_FAILED;
    6500  }
    6501  }
    6502  else
    6503  {
    6504  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6505  hAllocator->m_hDevice,
    6506  m_DedicatedAllocation.m_hMemory,
    6507  0, // offset
    6508  VK_WHOLE_SIZE,
    6509  0, // flags
    6510  ppData);
    6511  if(result == VK_SUCCESS)
    6512  {
    6513  m_DedicatedAllocation.m_pMappedData = *ppData;
    6514  m_MapCount = 1;
    6515  }
    6516  return result;
    6517  }
    6518 }
    6519 
    6520 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6523 
    6524  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6525  {
    6526  --m_MapCount;
    6527  if(m_MapCount == 0)
    6528  {
    6529  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6530  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6531  hAllocator->m_hDevice,
    6532  m_DedicatedAllocation.m_hMemory);
    6533  }
    6534  }
    6535  else
    6536  {
    6537  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6538  }
    6539 }
    6540 
    6541 #if VMA_STATS_STRING_ENABLED
    6542 
    6543 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6544 {
    6545  json.BeginObject();
    6546 
    6547  json.WriteString("Blocks");
    6548  json.WriteNumber(stat.blockCount);
    6549 
    6550  json.WriteString("Allocations");
    6551  json.WriteNumber(stat.allocationCount);
    6552 
    6553  json.WriteString("UnusedRanges");
    6554  json.WriteNumber(stat.unusedRangeCount);
    6555 
    6556  json.WriteString("UsedBytes");
    6557  json.WriteNumber(stat.usedBytes);
    6558 
    6559  json.WriteString("UnusedBytes");
    6560  json.WriteNumber(stat.unusedBytes);
    6561 
    6562  if(stat.allocationCount > 1)
    6563  {
    6564  json.WriteString("AllocationSize");
    6565  json.BeginObject(true);
    6566  json.WriteString("Min");
    6567  json.WriteNumber(stat.allocationSizeMin);
    6568  json.WriteString("Avg");
    6569  json.WriteNumber(stat.allocationSizeAvg);
    6570  json.WriteString("Max");
    6571  json.WriteNumber(stat.allocationSizeMax);
    6572  json.EndObject();
    6573  }
    6574 
    6575  if(stat.unusedRangeCount > 1)
    6576  {
    6577  json.WriteString("UnusedRangeSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.unusedRangeSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.unusedRangeSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.unusedRangeSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  json.EndObject();
    6589 }
    6590 
    6591 #endif // #if VMA_STATS_STRING_ENABLED
    6592 
    6593 struct VmaSuballocationItemSizeLess
    6594 {
    6595  bool operator()(
    6596  const VmaSuballocationList::iterator lhs,
    6597  const VmaSuballocationList::iterator rhs) const
    6598  {
    6599  return lhs->size < rhs->size;
    6600  }
    6601  bool operator()(
    6602  const VmaSuballocationList::iterator lhs,
    6603  VkDeviceSize rhsSize) const
    6604  {
    6605  return lhs->size < rhsSize;
    6606  }
    6607 };
    6608 
    6609 
    6611 // class VmaBlockMetadata
    6612 
    6613 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6614  m_Size(0),
    6615  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6616 {
    6617 }
    6618 
    6619 #if VMA_STATS_STRING_ENABLED
    6620 
    6621 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6622  VkDeviceSize unusedBytes,
    6623  size_t allocationCount,
    6624  size_t unusedRangeCount) const
    6625 {
    6626  json.BeginObject();
    6627 
    6628  json.WriteString("TotalBytes");
    6629  json.WriteNumber(GetSize());
    6630 
    6631  json.WriteString("UnusedBytes");
    6632  json.WriteNumber(unusedBytes);
    6633 
    6634  json.WriteString("Allocations");
    6635  json.WriteNumber((uint64_t)allocationCount);
    6636 
    6637  json.WriteString("UnusedRanges");
    6638  json.WriteNumber((uint64_t)unusedRangeCount);
    6639 
    6640  json.WriteString("Suballocations");
    6641  json.BeginArray();
    6642 }
    6643 
    6644 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6645  VkDeviceSize offset,
    6646  VmaAllocation hAllocation) const
    6647 {
    6648  json.BeginObject(true);
    6649 
    6650  json.WriteString("Offset");
    6651  json.WriteNumber(offset);
    6652 
    6653  hAllocation->PrintParameters(json);
    6654 
    6655  json.EndObject();
    6656 }
    6657 
    6658 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6659  VkDeviceSize offset,
    6660  VkDeviceSize size) const
    6661 {
    6662  json.BeginObject(true);
    6663 
    6664  json.WriteString("Offset");
    6665  json.WriteNumber(offset);
    6666 
    6667  json.WriteString("Type");
    6668  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6669 
    6670  json.WriteString("Size");
    6671  json.WriteNumber(size);
    6672 
    6673  json.EndObject();
    6674 }
    6675 
    6676 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6677 {
    6678  json.EndArray();
    6679  json.EndObject();
    6680 }
    6681 
    6682 #endif // #if VMA_STATS_STRING_ENABLED
    6683 
    6685 // class VmaBlockMetadata_Generic
    6686 
    6687 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6688  VmaBlockMetadata(hAllocator),
    6689  m_FreeCount(0),
    6690  m_SumFreeSize(0),
    6691  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6692  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6693 {
    6694 }
    6695 
    6696 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6697 {
    6698 }
    6699 
    6700 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6701 {
    6702  VmaBlockMetadata::Init(size);
    6703 
    6704  m_FreeCount = 1;
    6705  m_SumFreeSize = size;
    6706 
    6707  VmaSuballocation suballoc = {};
    6708  suballoc.offset = 0;
    6709  suballoc.size = size;
    6710  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6711  suballoc.hAllocation = VK_NULL_HANDLE;
    6712 
    6713  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6714  m_Suballocations.push_back(suballoc);
    6715  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6716  --suballocItem;
    6717  m_FreeSuballocationsBySize.push_back(suballocItem);
    6718 }
    6719 
    6720 bool VmaBlockMetadata_Generic::Validate() const
    6721 {
    6722  VMA_VALIDATE(!m_Suballocations.empty());
    6723 
    6724  // Expected offset of new suballocation as calculated from previous ones.
    6725  VkDeviceSize calculatedOffset = 0;
    6726  // Expected number of free suballocations as calculated from traversing their list.
    6727  uint32_t calculatedFreeCount = 0;
    6728  // Expected sum size of free suballocations as calculated from traversing their list.
    6729  VkDeviceSize calculatedSumFreeSize = 0;
    6730  // Expected number of free suballocations that should be registered in
    6731  // m_FreeSuballocationsBySize calculated from traversing their list.
    6732  size_t freeSuballocationsToRegister = 0;
    6733  // True if previous visited suballocation was free.
    6734  bool prevFree = false;
    6735 
    6736  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6737  suballocItem != m_Suballocations.cend();
    6738  ++suballocItem)
    6739  {
    6740  const VmaSuballocation& subAlloc = *suballocItem;
    6741 
    6742  // Actual offset of this suballocation doesn't match expected one.
    6743  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6744 
    6745  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6746  // Two adjacent free suballocations are invalid. They should be merged.
    6747  VMA_VALIDATE(!prevFree || !currFree);
    6748 
    6749  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6750 
    6751  if(currFree)
    6752  {
    6753  calculatedSumFreeSize += subAlloc.size;
    6754  ++calculatedFreeCount;
    6755  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6756  {
    6757  ++freeSuballocationsToRegister;
    6758  }
    6759 
    6760  // Margin required between allocations - every free space must be at least that large.
    6761  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6762  }
    6763  else
    6764  {
    6765  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6766  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6767 
    6768  // Margin required between allocations - previous allocation must be free.
    6769  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6770  }
    6771 
    6772  calculatedOffset += subAlloc.size;
    6773  prevFree = currFree;
    6774  }
    6775 
    6776  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6777  // match expected one.
    6778  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6779 
    6780  VkDeviceSize lastSize = 0;
    6781  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6782  {
    6783  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6784 
    6785  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6786  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6787  // They must be sorted by size ascending.
    6788  VMA_VALIDATE(suballocItem->size >= lastSize);
    6789 
    6790  lastSize = suballocItem->size;
    6791  }
    6792 
    6793  // Check if totals match calculacted values.
    6794  VMA_VALIDATE(ValidateFreeSuballocationList());
    6795  VMA_VALIDATE(calculatedOffset == GetSize());
    6796  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6797  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6798 
    6799  return true;
    6800 }
    6801 
    6802 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6803 {
    6804  if(!m_FreeSuballocationsBySize.empty())
    6805  {
    6806  return m_FreeSuballocationsBySize.back()->size;
    6807  }
    6808  else
    6809  {
    6810  return 0;
    6811  }
    6812 }
    6813 
    6814 bool VmaBlockMetadata_Generic::IsEmpty() const
    6815 {
    6816  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6817 }
    6818 
    6819 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6820 {
    6821  outInfo.blockCount = 1;
    6822 
    6823  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6824  outInfo.allocationCount = rangeCount - m_FreeCount;
    6825  outInfo.unusedRangeCount = m_FreeCount;
    6826 
    6827  outInfo.unusedBytes = m_SumFreeSize;
    6828  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6829 
    6830  outInfo.allocationSizeMin = UINT64_MAX;
    6831  outInfo.allocationSizeMax = 0;
    6832  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6833  outInfo.unusedRangeSizeMax = 0;
    6834 
    6835  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6836  suballocItem != m_Suballocations.cend();
    6837  ++suballocItem)
    6838  {
    6839  const VmaSuballocation& suballoc = *suballocItem;
    6840  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6841  {
    6842  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6843  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6844  }
    6845  else
    6846  {
    6847  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6848  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6849  }
    6850  }
    6851 }
    6852 
    6853 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6854 {
    6855  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6856 
    6857  inoutStats.size += GetSize();
    6858  inoutStats.unusedSize += m_SumFreeSize;
    6859  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6860  inoutStats.unusedRangeCount += m_FreeCount;
    6861  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6862 }
    6863 
    6864 #if VMA_STATS_STRING_ENABLED
    6865 
    6866 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6867 {
    6868  PrintDetailedMap_Begin(json,
    6869  m_SumFreeSize, // unusedBytes
    6870  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6871  m_FreeCount); // unusedRangeCount
    6872 
    6873  size_t i = 0;
    6874  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6875  suballocItem != m_Suballocations.cend();
    6876  ++suballocItem, ++i)
    6877  {
    6878  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6879  {
    6880  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6881  }
    6882  else
    6883  {
    6884  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6885  }
    6886  }
    6887 
    6888  PrintDetailedMap_End(json);
    6889 }
    6890 
    6891 #endif // #if VMA_STATS_STRING_ENABLED
    6892 
    6893 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6894  uint32_t currentFrameIndex,
    6895  uint32_t frameInUseCount,
    6896  VkDeviceSize bufferImageGranularity,
    6897  VkDeviceSize allocSize,
    6898  VkDeviceSize allocAlignment,
    6899  bool upperAddress,
    6900  VmaSuballocationType allocType,
    6901  bool canMakeOtherLost,
    6902  uint32_t strategy,
    6903  VmaAllocationRequest* pAllocationRequest)
    6904 {
    6905  VMA_ASSERT(allocSize > 0);
    6906  VMA_ASSERT(!upperAddress);
    6907  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6908  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6909  VMA_HEAVY_ASSERT(Validate());
    6910 
    6911  // There is not enough total free space in this block to fullfill the request: Early return.
    6912  if(canMakeOtherLost == false &&
    6913  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6914  {
    6915  return false;
    6916  }
    6917 
    6918  // New algorithm, efficiently searching freeSuballocationsBySize.
    6919  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6920  if(freeSuballocCount > 0)
    6921  {
    6923  {
    6924  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6925  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6926  m_FreeSuballocationsBySize.data(),
    6927  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6928  allocSize + 2 * VMA_DEBUG_MARGIN,
    6929  VmaSuballocationItemSizeLess());
    6930  size_t index = it - m_FreeSuballocationsBySize.data();
    6931  for(; index < freeSuballocCount; ++index)
    6932  {
    6933  if(CheckAllocation(
    6934  currentFrameIndex,
    6935  frameInUseCount,
    6936  bufferImageGranularity,
    6937  allocSize,
    6938  allocAlignment,
    6939  allocType,
    6940  m_FreeSuballocationsBySize[index],
    6941  false, // canMakeOtherLost
    6942  &pAllocationRequest->offset,
    6943  &pAllocationRequest->itemsToMakeLostCount,
    6944  &pAllocationRequest->sumFreeSize,
    6945  &pAllocationRequest->sumItemSize))
    6946  {
    6947  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6948  return true;
    6949  }
    6950  }
    6951  }
    6952  else // WORST_FIT, FIRST_FIT
    6953  {
    6954  // Search staring from biggest suballocations.
    6955  for(size_t index = freeSuballocCount; index--; )
    6956  {
    6957  if(CheckAllocation(
    6958  currentFrameIndex,
    6959  frameInUseCount,
    6960  bufferImageGranularity,
    6961  allocSize,
    6962  allocAlignment,
    6963  allocType,
    6964  m_FreeSuballocationsBySize[index],
    6965  false, // canMakeOtherLost
    6966  &pAllocationRequest->offset,
    6967  &pAllocationRequest->itemsToMakeLostCount,
    6968  &pAllocationRequest->sumFreeSize,
    6969  &pAllocationRequest->sumItemSize))
    6970  {
    6971  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6972  return true;
    6973  }
    6974  }
    6975  }
    6976  }
    6977 
    6978  if(canMakeOtherLost)
    6979  {
    6980  // Brute-force algorithm. TODO: Come up with something better.
    6981 
    6982  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6983  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6984 
    6985  VmaAllocationRequest tmpAllocRequest = {};
    6986  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6987  suballocIt != m_Suballocations.end();
    6988  ++suballocIt)
    6989  {
    6990  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6991  suballocIt->hAllocation->CanBecomeLost())
    6992  {
    6993  if(CheckAllocation(
    6994  currentFrameIndex,
    6995  frameInUseCount,
    6996  bufferImageGranularity,
    6997  allocSize,
    6998  allocAlignment,
    6999  allocType,
    7000  suballocIt,
    7001  canMakeOtherLost,
    7002  &tmpAllocRequest.offset,
    7003  &tmpAllocRequest.itemsToMakeLostCount,
    7004  &tmpAllocRequest.sumFreeSize,
    7005  &tmpAllocRequest.sumItemSize))
    7006  {
    7007  tmpAllocRequest.item = suballocIt;
    7008 
    7009  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7011  {
    7012  *pAllocationRequest = tmpAllocRequest;
    7013  }
    7014  }
    7015  }
    7016  }
    7017 
    7018  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7019  {
    7020  return true;
    7021  }
    7022  }
    7023 
    7024  return false;
    7025 }
    7026 
    7027 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7028  uint32_t currentFrameIndex,
    7029  uint32_t frameInUseCount,
    7030  VmaAllocationRequest* pAllocationRequest)
    7031 {
    7032  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7033  {
    7034  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7035  {
    7036  ++pAllocationRequest->item;
    7037  }
    7038  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7039  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7040  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7041  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7042  {
    7043  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7044  --pAllocationRequest->itemsToMakeLostCount;
    7045  }
    7046  else
    7047  {
    7048  return false;
    7049  }
    7050  }
    7051 
    7052  VMA_HEAVY_ASSERT(Validate());
    7053  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7054  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7055 
    7056  return true;
    7057 }
    7058 
    7059 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7060 {
    7061  uint32_t lostAllocationCount = 0;
    7062  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7063  it != m_Suballocations.end();
    7064  ++it)
    7065  {
    7066  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7067  it->hAllocation->CanBecomeLost() &&
    7068  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7069  {
    7070  it = FreeSuballocation(it);
    7071  ++lostAllocationCount;
    7072  }
    7073  }
    7074  return lostAllocationCount;
    7075 }
    7076 
    7077 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7078 {
    7079  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7080  it != m_Suballocations.end();
    7081  ++it)
    7082  {
    7083  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7084  {
    7085  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7086  {
    7087  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7088  return VK_ERROR_VALIDATION_FAILED_EXT;
    7089  }
    7090  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7091  {
    7092  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7093  return VK_ERROR_VALIDATION_FAILED_EXT;
    7094  }
    7095  }
    7096  }
    7097 
    7098  return VK_SUCCESS;
    7099 }
    7100 
    7101 void VmaBlockMetadata_Generic::Alloc(
    7102  const VmaAllocationRequest& request,
    7103  VmaSuballocationType type,
    7104  VkDeviceSize allocSize,
    7105  bool upperAddress,
    7106  VmaAllocation hAllocation)
    7107 {
    7108  VMA_ASSERT(!upperAddress);
    7109  VMA_ASSERT(request.item != m_Suballocations.end());
    7110  VmaSuballocation& suballoc = *request.item;
    7111  // Given suballocation is a free block.
    7112  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7113  // Given offset is inside this suballocation.
    7114  VMA_ASSERT(request.offset >= suballoc.offset);
    7115  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7116  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7117  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7118 
    7119  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7120  // it to become used.
    7121  UnregisterFreeSuballocation(request.item);
    7122 
    7123  suballoc.offset = request.offset;
    7124  suballoc.size = allocSize;
    7125  suballoc.type = type;
    7126  suballoc.hAllocation = hAllocation;
    7127 
    7128  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7129  if(paddingEnd)
    7130  {
    7131  VmaSuballocation paddingSuballoc = {};
    7132  paddingSuballoc.offset = request.offset + allocSize;
    7133  paddingSuballoc.size = paddingEnd;
    7134  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7135  VmaSuballocationList::iterator next = request.item;
    7136  ++next;
    7137  const VmaSuballocationList::iterator paddingEndItem =
    7138  m_Suballocations.insert(next, paddingSuballoc);
    7139  RegisterFreeSuballocation(paddingEndItem);
    7140  }
    7141 
    7142  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7143  if(paddingBegin)
    7144  {
    7145  VmaSuballocation paddingSuballoc = {};
    7146  paddingSuballoc.offset = request.offset - paddingBegin;
    7147  paddingSuballoc.size = paddingBegin;
    7148  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7149  const VmaSuballocationList::iterator paddingBeginItem =
    7150  m_Suballocations.insert(request.item, paddingSuballoc);
    7151  RegisterFreeSuballocation(paddingBeginItem);
    7152  }
    7153 
    7154  // Update totals.
    7155  m_FreeCount = m_FreeCount - 1;
    7156  if(paddingBegin > 0)
    7157  {
    7158  ++m_FreeCount;
    7159  }
    7160  if(paddingEnd > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  m_SumFreeSize -= allocSize;
    7165 }
    7166 
    7167 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7168 {
    7169  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7170  suballocItem != m_Suballocations.end();
    7171  ++suballocItem)
    7172  {
    7173  VmaSuballocation& suballoc = *suballocItem;
    7174  if(suballoc.hAllocation == allocation)
    7175  {
    7176  FreeSuballocation(suballocItem);
    7177  VMA_HEAVY_ASSERT(Validate());
    7178  return;
    7179  }
    7180  }
    7181  VMA_ASSERT(0 && "Not found!");
    7182 }
    7183 
    7184 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7185 {
    7186  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7187  suballocItem != m_Suballocations.end();
    7188  ++suballocItem)
    7189  {
    7190  VmaSuballocation& suballoc = *suballocItem;
    7191  if(suballoc.offset == offset)
    7192  {
    7193  FreeSuballocation(suballocItem);
    7194  return;
    7195  }
    7196  }
    7197  VMA_ASSERT(0 && "Not found!");
    7198 }
    7199 
    7200 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7201 {
    7202  VkDeviceSize lastSize = 0;
    7203  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7204  {
    7205  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7206 
    7207  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7208  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7209  VMA_VALIDATE(it->size >= lastSize);
    7210  lastSize = it->size;
    7211  }
    7212  return true;
    7213 }
    7214 
    7215 bool VmaBlockMetadata_Generic::CheckAllocation(
    7216  uint32_t currentFrameIndex,
    7217  uint32_t frameInUseCount,
    7218  VkDeviceSize bufferImageGranularity,
    7219  VkDeviceSize allocSize,
    7220  VkDeviceSize allocAlignment,
    7221  VmaSuballocationType allocType,
    7222  VmaSuballocationList::const_iterator suballocItem,
    7223  bool canMakeOtherLost,
    7224  VkDeviceSize* pOffset,
    7225  size_t* itemsToMakeLostCount,
    7226  VkDeviceSize* pSumFreeSize,
    7227  VkDeviceSize* pSumItemSize) const
    7228 {
    7229  VMA_ASSERT(allocSize > 0);
    7230  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7231  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7232  VMA_ASSERT(pOffset != VMA_NULL);
    7233 
    7234  *itemsToMakeLostCount = 0;
    7235  *pSumFreeSize = 0;
    7236  *pSumItemSize = 0;
    7237 
    7238  if(canMakeOtherLost)
    7239  {
    7240  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7241  {
    7242  *pSumFreeSize = suballocItem->size;
    7243  }
    7244  else
    7245  {
    7246  if(suballocItem->hAllocation->CanBecomeLost() &&
    7247  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7248  {
    7249  ++*itemsToMakeLostCount;
    7250  *pSumItemSize = suballocItem->size;
    7251  }
    7252  else
    7253  {
    7254  return false;
    7255  }
    7256  }
    7257 
    7258  // Remaining size is too small for this request: Early return.
    7259  if(GetSize() - suballocItem->offset < allocSize)
    7260  {
    7261  return false;
    7262  }
    7263 
    7264  // Start from offset equal to beginning of this suballocation.
    7265  *pOffset = suballocItem->offset;
    7266 
    7267  // Apply VMA_DEBUG_MARGIN at the beginning.
    7268  if(VMA_DEBUG_MARGIN > 0)
    7269  {
    7270  *pOffset += VMA_DEBUG_MARGIN;
    7271  }
    7272 
    7273  // Apply alignment.
    7274  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7275 
    7276  // Check previous suballocations for BufferImageGranularity conflicts.
    7277  // Make bigger alignment if necessary.
    7278  if(bufferImageGranularity > 1)
    7279  {
    7280  bool bufferImageGranularityConflict = false;
    7281  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7282  while(prevSuballocItem != m_Suballocations.cbegin())
    7283  {
    7284  --prevSuballocItem;
    7285  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7286  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7287  {
    7288  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7289  {
    7290  bufferImageGranularityConflict = true;
    7291  break;
    7292  }
    7293  }
    7294  else
    7295  // Already on previous page.
    7296  break;
    7297  }
    7298  if(bufferImageGranularityConflict)
    7299  {
    7300  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7301  }
    7302  }
    7303 
    7304  // Now that we have final *pOffset, check if we are past suballocItem.
    7305  // If yes, return false - this function should be called for another suballocItem as starting point.
    7306  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7307  {
    7308  return false;
    7309  }
    7310 
    7311  // Calculate padding at the beginning based on current offset.
    7312  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7313 
    7314  // Calculate required margin at the end.
    7315  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7316 
    7317  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7318  // Another early return check.
    7319  if(suballocItem->offset + totalSize > GetSize())
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Advance lastSuballocItem until desired size is reached.
    7325  // Update itemsToMakeLostCount.
    7326  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7327  if(totalSize > suballocItem->size)
    7328  {
    7329  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7330  while(remainingSize > 0)
    7331  {
    7332  ++lastSuballocItem;
    7333  if(lastSuballocItem == m_Suballocations.cend())
    7334  {
    7335  return false;
    7336  }
    7337  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7338  {
    7339  *pSumFreeSize += lastSuballocItem->size;
    7340  }
    7341  else
    7342  {
    7343  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7344  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7345  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7346  {
    7347  ++*itemsToMakeLostCount;
    7348  *pSumItemSize += lastSuballocItem->size;
    7349  }
    7350  else
    7351  {
    7352  return false;
    7353  }
    7354  }
    7355  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7356  remainingSize - lastSuballocItem->size : 0;
    7357  }
    7358  }
    7359 
    7360  // Check next suballocations for BufferImageGranularity conflicts.
    7361  // If conflict exists, we must mark more allocations lost or fail.
    7362  if(bufferImageGranularity > 1)
    7363  {
    7364  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7365  ++nextSuballocItem;
    7366  while(nextSuballocItem != m_Suballocations.cend())
    7367  {
    7368  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7369  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7370  {
    7371  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7372  {
    7373  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7374  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7375  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7376  {
    7377  ++*itemsToMakeLostCount;
    7378  }
    7379  else
    7380  {
    7381  return false;
    7382  }
    7383  }
    7384  }
    7385  else
    7386  {
    7387  // Already on next page.
    7388  break;
    7389  }
    7390  ++nextSuballocItem;
    7391  }
    7392  }
    7393  }
    7394  else
    7395  {
    7396  const VmaSuballocation& suballoc = *suballocItem;
    7397  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7398 
    7399  *pSumFreeSize = suballoc.size;
    7400 
    7401  // Size of this suballocation is too small for this request: Early return.
    7402  if(suballoc.size < allocSize)
    7403  {
    7404  return false;
    7405  }
    7406 
    7407  // Start from offset equal to beginning of this suballocation.
    7408  *pOffset = suballoc.offset;
    7409 
    7410  // Apply VMA_DEBUG_MARGIN at the beginning.
    7411  if(VMA_DEBUG_MARGIN > 0)
    7412  {
    7413  *pOffset += VMA_DEBUG_MARGIN;
    7414  }
    7415 
    7416  // Apply alignment.
    7417  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7418 
    7419  // Check previous suballocations for BufferImageGranularity conflicts.
    7420  // Make bigger alignment if necessary.
    7421  if(bufferImageGranularity > 1)
    7422  {
    7423  bool bufferImageGranularityConflict = false;
    7424  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7425  while(prevSuballocItem != m_Suballocations.cbegin())
    7426  {
    7427  --prevSuballocItem;
    7428  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7429  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7430  {
    7431  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7432  {
    7433  bufferImageGranularityConflict = true;
    7434  break;
    7435  }
    7436  }
    7437  else
    7438  // Already on previous page.
    7439  break;
    7440  }
    7441  if(bufferImageGranularityConflict)
    7442  {
    7443  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7444  }
    7445  }
    7446 
    7447  // Calculate padding at the beginning based on current offset.
    7448  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7449 
    7450  // Calculate required margin at the end.
    7451  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7452 
    7453  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7454  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7455  {
    7456  return false;
    7457  }
    7458 
    7459  // Check next suballocations for BufferImageGranularity conflicts.
    7460  // If conflict exists, allocation cannot be made here.
    7461  if(bufferImageGranularity > 1)
    7462  {
    7463  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7464  ++nextSuballocItem;
    7465  while(nextSuballocItem != m_Suballocations.cend())
    7466  {
    7467  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7468  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7469  {
    7470  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7471  {
    7472  return false;
    7473  }
    7474  }
    7475  else
    7476  {
    7477  // Already on next page.
    7478  break;
    7479  }
    7480  ++nextSuballocItem;
    7481  }
    7482  }
    7483  }
    7484 
    7485  // All tests passed: Success. pOffset is already filled.
    7486  return true;
    7487 }
    7488 
    7489 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7490 {
    7491  VMA_ASSERT(item != m_Suballocations.end());
    7492  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7493 
    7494  VmaSuballocationList::iterator nextItem = item;
    7495  ++nextItem;
    7496  VMA_ASSERT(nextItem != m_Suballocations.end());
    7497  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7498 
    7499  item->size += nextItem->size;
    7500  --m_FreeCount;
    7501  m_Suballocations.erase(nextItem);
    7502 }
    7503 
    7504 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7505 {
    7506  // Change this suballocation to be marked as free.
    7507  VmaSuballocation& suballoc = *suballocItem;
    7508  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7509  suballoc.hAllocation = VK_NULL_HANDLE;
    7510 
    7511  // Update totals.
    7512  ++m_FreeCount;
    7513  m_SumFreeSize += suballoc.size;
    7514 
    7515  // Merge with previous and/or next suballocation if it's also free.
    7516  bool mergeWithNext = false;
    7517  bool mergeWithPrev = false;
    7518 
    7519  VmaSuballocationList::iterator nextItem = suballocItem;
    7520  ++nextItem;
    7521  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7522  {
    7523  mergeWithNext = true;
    7524  }
    7525 
    7526  VmaSuballocationList::iterator prevItem = suballocItem;
    7527  if(suballocItem != m_Suballocations.begin())
    7528  {
    7529  --prevItem;
    7530  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7531  {
    7532  mergeWithPrev = true;
    7533  }
    7534  }
    7535 
    7536  if(mergeWithNext)
    7537  {
    7538  UnregisterFreeSuballocation(nextItem);
    7539  MergeFreeWithNext(suballocItem);
    7540  }
    7541 
    7542  if(mergeWithPrev)
    7543  {
    7544  UnregisterFreeSuballocation(prevItem);
    7545  MergeFreeWithNext(prevItem);
    7546  RegisterFreeSuballocation(prevItem);
    7547  return prevItem;
    7548  }
    7549  else
    7550  {
    7551  RegisterFreeSuballocation(suballocItem);
    7552  return suballocItem;
    7553  }
    7554 }
    7555 
    7556 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7557 {
    7558  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7559  VMA_ASSERT(item->size > 0);
    7560 
    7561  // You may want to enable this validation at the beginning or at the end of
    7562  // this function, depending on what do you want to check.
    7563  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7564 
    7565  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7566  {
    7567  if(m_FreeSuballocationsBySize.empty())
    7568  {
    7569  m_FreeSuballocationsBySize.push_back(item);
    7570  }
    7571  else
    7572  {
    7573  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7574  }
    7575  }
    7576 
    7577  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7578 }
    7579 
    7580 
    7581 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7582 {
    7583  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7584  VMA_ASSERT(item->size > 0);
    7585 
    7586  // You may want to enable this validation at the beginning or at the end of
    7587  // this function, depending on what do you want to check.
    7588  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7589 
    7590  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7591  {
    7592  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7593  m_FreeSuballocationsBySize.data(),
    7594  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7595  item,
    7596  VmaSuballocationItemSizeLess());
    7597  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7598  index < m_FreeSuballocationsBySize.size();
    7599  ++index)
    7600  {
    7601  if(m_FreeSuballocationsBySize[index] == item)
    7602  {
    7603  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7604  return;
    7605  }
    7606  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7607  }
    7608  VMA_ASSERT(0 && "Not found.");
    7609  }
    7610 
    7611  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7612 }
    7613 
    7615 // class VmaBlockMetadata_Linear
    7616 
    7617 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7618  VmaBlockMetadata(hAllocator),
    7619  m_SumFreeSize(0),
    7620  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7621  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7622  m_1stVectorIndex(0),
    7623  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7624  m_1stNullItemsBeginCount(0),
    7625  m_1stNullItemsMiddleCount(0),
    7626  m_2ndNullItemsCount(0)
    7627 {
    7628 }
    7629 
    7630 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7631 {
    7632 }
    7633 
    7634 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7635 {
    7636  VmaBlockMetadata::Init(size);
    7637  m_SumFreeSize = size;
    7638 }
    7639 
    7640 bool VmaBlockMetadata_Linear::Validate() const
    7641 {
    7642  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7643  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7644 
    7645  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7646  VMA_VALIDATE(!suballocations1st.empty() ||
    7647  suballocations2nd.empty() ||
    7648  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7649 
    7650  if(!suballocations1st.empty())
    7651  {
    7652  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7653  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7654  // Null item at the end should be just pop_back().
    7655  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7656  }
    7657  if(!suballocations2nd.empty())
    7658  {
    7659  // Null item at the end should be just pop_back().
    7660  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7661  }
    7662 
    7663  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7664  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7665 
    7666  VkDeviceSize sumUsedSize = 0;
    7667  const size_t suballoc1stCount = suballocations1st.size();
    7668  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7669 
    7670  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7671  {
    7672  const size_t suballoc2ndCount = suballocations2nd.size();
    7673  size_t nullItem2ndCount = 0;
    7674  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7675  {
    7676  const VmaSuballocation& suballoc = suballocations2nd[i];
    7677  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7678 
    7679  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7680  VMA_VALIDATE(suballoc.offset >= offset);
    7681 
    7682  if(!currFree)
    7683  {
    7684  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7685  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7686  sumUsedSize += suballoc.size;
    7687  }
    7688  else
    7689  {
    7690  ++nullItem2ndCount;
    7691  }
    7692 
    7693  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7694  }
    7695 
    7696  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7697  }
    7698 
    7699  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7700  {
    7701  const VmaSuballocation& suballoc = suballocations1st[i];
    7702  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7703  suballoc.hAllocation == VK_NULL_HANDLE);
    7704  }
    7705 
    7706  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7707 
    7708  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7709  {
    7710  const VmaSuballocation& suballoc = suballocations1st[i];
    7711  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7712 
    7713  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7714  VMA_VALIDATE(suballoc.offset >= offset);
    7715  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7716 
    7717  if(!currFree)
    7718  {
    7719  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7720  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7721  sumUsedSize += suballoc.size;
    7722  }
    7723  else
    7724  {
    7725  ++nullItem1stCount;
    7726  }
    7727 
    7728  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7729  }
    7730  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7731 
    7732  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7733  {
    7734  const size_t suballoc2ndCount = suballocations2nd.size();
    7735  size_t nullItem2ndCount = 0;
    7736  for(size_t i = suballoc2ndCount; i--; )
    7737  {
    7738  const VmaSuballocation& suballoc = suballocations2nd[i];
    7739  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7740 
    7741  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7742  VMA_VALIDATE(suballoc.offset >= offset);
    7743 
    7744  if(!currFree)
    7745  {
    7746  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7747  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7748  sumUsedSize += suballoc.size;
    7749  }
    7750  else
    7751  {
    7752  ++nullItem2ndCount;
    7753  }
    7754 
    7755  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7756  }
    7757 
    7758  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7759  }
    7760 
    7761  VMA_VALIDATE(offset <= GetSize());
    7762  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7763 
    7764  return true;
    7765 }
    7766 
    7767 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7768 {
    7769  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7770  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7771 }
    7772 
    7773 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7774 {
    7775  const VkDeviceSize size = GetSize();
    7776 
    7777  /*
    7778  We don't consider gaps inside allocation vectors with freed allocations because
    7779  they are not suitable for reuse in linear allocator. We consider only space that
    7780  is available for new allocations.
    7781  */
    7782  if(IsEmpty())
    7783  {
    7784  return size;
    7785  }
    7786 
    7787  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7788 
    7789  switch(m_2ndVectorMode)
    7790  {
    7791  case SECOND_VECTOR_EMPTY:
    7792  /*
    7793  Available space is after end of 1st, as well as before beginning of 1st (which
    7794  whould make it a ring buffer).
    7795  */
    7796  {
    7797  const size_t suballocations1stCount = suballocations1st.size();
    7798  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7799  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7800  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7801  return VMA_MAX(
    7802  firstSuballoc.offset,
    7803  size - (lastSuballoc.offset + lastSuballoc.size));
    7804  }
    7805  break;
    7806 
    7807  case SECOND_VECTOR_RING_BUFFER:
    7808  /*
    7809  Available space is only between end of 2nd and beginning of 1st.
    7810  */
    7811  {
    7812  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7813  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7814  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7815  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7816  }
    7817  break;
    7818 
    7819  case SECOND_VECTOR_DOUBLE_STACK:
    7820  /*
    7821  Available space is only between end of 1st and top of 2nd.
    7822  */
    7823  {
    7824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7825  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7826  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7827  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7828  }
    7829  break;
    7830 
    7831  default:
    7832  VMA_ASSERT(0);
    7833  return 0;
    7834  }
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7838 {
    7839  const VkDeviceSize size = GetSize();
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842  const size_t suballoc1stCount = suballocations1st.size();
    7843  const size_t suballoc2ndCount = suballocations2nd.size();
    7844 
    7845  outInfo.blockCount = 1;
    7846  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7847  outInfo.unusedRangeCount = 0;
    7848  outInfo.usedBytes = 0;
    7849  outInfo.allocationSizeMin = UINT64_MAX;
    7850  outInfo.allocationSizeMax = 0;
    7851  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7852  outInfo.unusedRangeSizeMax = 0;
    7853 
    7854  VkDeviceSize lastOffset = 0;
    7855 
    7856  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7857  {
    7858  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7859  size_t nextAlloc2ndIndex = 0;
    7860  while(lastOffset < freeSpace2ndTo1stEnd)
    7861  {
    7862  // Find next non-null allocation or move nextAllocIndex to the end.
    7863  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7864  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7865  {
    7866  ++nextAlloc2ndIndex;
    7867  }
    7868 
    7869  // Found non-null allocation.
    7870  if(nextAlloc2ndIndex < suballoc2ndCount)
    7871  {
    7872  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7873 
    7874  // 1. Process free space before this allocation.
    7875  if(lastOffset < suballoc.offset)
    7876  {
    7877  // There is free space from lastOffset to suballoc.offset.
    7878  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7879  ++outInfo.unusedRangeCount;
    7880  outInfo.unusedBytes += unusedRangeSize;
    7881  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7882  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7883  }
    7884 
    7885  // 2. Process this allocation.
    7886  // There is allocation with suballoc.offset, suballoc.size.
    7887  outInfo.usedBytes += suballoc.size;
    7888  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7889  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7890 
    7891  // 3. Prepare for next iteration.
    7892  lastOffset = suballoc.offset + suballoc.size;
    7893  ++nextAlloc2ndIndex;
    7894  }
    7895  // We are at the end.
    7896  else
    7897  {
    7898  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7899  if(lastOffset < freeSpace2ndTo1stEnd)
    7900  {
    7901  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7902  ++outInfo.unusedRangeCount;
    7903  outInfo.unusedBytes += unusedRangeSize;
    7904  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7905  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7906  }
    7907 
    7908  // End of loop.
    7909  lastOffset = freeSpace2ndTo1stEnd;
    7910  }
    7911  }
    7912  }
    7913 
    7914  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7915  const VkDeviceSize freeSpace1stTo2ndEnd =
    7916  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7917  while(lastOffset < freeSpace1stTo2ndEnd)
    7918  {
    7919  // Find next non-null allocation or move nextAllocIndex to the end.
    7920  while(nextAlloc1stIndex < suballoc1stCount &&
    7921  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7922  {
    7923  ++nextAlloc1stIndex;
    7924  }
    7925 
    7926  // Found non-null allocation.
    7927  if(nextAlloc1stIndex < suballoc1stCount)
    7928  {
    7929  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7930 
    7931  // 1. Process free space before this allocation.
    7932  if(lastOffset < suballoc.offset)
    7933  {
    7934  // There is free space from lastOffset to suballoc.offset.
    7935  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7936  ++outInfo.unusedRangeCount;
    7937  outInfo.unusedBytes += unusedRangeSize;
    7938  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7939  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7940  }
    7941 
    7942  // 2. Process this allocation.
    7943  // There is allocation with suballoc.offset, suballoc.size.
    7944  outInfo.usedBytes += suballoc.size;
    7945  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7946  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7947 
    7948  // 3. Prepare for next iteration.
    7949  lastOffset = suballoc.offset + suballoc.size;
    7950  ++nextAlloc1stIndex;
    7951  }
    7952  // We are at the end.
    7953  else
    7954  {
    7955  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7956  if(lastOffset < freeSpace1stTo2ndEnd)
    7957  {
    7958  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7959  ++outInfo.unusedRangeCount;
    7960  outInfo.unusedBytes += unusedRangeSize;
    7961  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7962  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7963  }
    7964 
    7965  // End of loop.
    7966  lastOffset = freeSpace1stTo2ndEnd;
    7967  }
    7968  }
    7969 
    7970  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7971  {
    7972  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7973  while(lastOffset < size)
    7974  {
    7975  // Find next non-null allocation or move nextAllocIndex to the end.
    7976  while(nextAlloc2ndIndex != SIZE_MAX &&
    7977  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7978  {
    7979  --nextAlloc2ndIndex;
    7980  }
    7981 
    7982  // Found non-null allocation.
    7983  if(nextAlloc2ndIndex != SIZE_MAX)
    7984  {
    7985  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7986 
    7987  // 1. Process free space before this allocation.
    7988  if(lastOffset < suballoc.offset)
    7989  {
    7990  // There is free space from lastOffset to suballoc.offset.
    7991  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7992  ++outInfo.unusedRangeCount;
    7993  outInfo.unusedBytes += unusedRangeSize;
    7994  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7995  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7996  }
    7997 
    7998  // 2. Process this allocation.
    7999  // There is allocation with suballoc.offset, suballoc.size.
    8000  outInfo.usedBytes += suballoc.size;
    8001  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8002  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8003 
    8004  // 3. Prepare for next iteration.
    8005  lastOffset = suballoc.offset + suballoc.size;
    8006  --nextAlloc2ndIndex;
    8007  }
    8008  // We are at the end.
    8009  else
    8010  {
    8011  // There is free space from lastOffset to size.
    8012  if(lastOffset < size)
    8013  {
    8014  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8015  ++outInfo.unusedRangeCount;
    8016  outInfo.unusedBytes += unusedRangeSize;
    8017  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8018  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8019  }
    8020 
    8021  // End of loop.
    8022  lastOffset = size;
    8023  }
    8024  }
    8025  }
    8026 
    8027  outInfo.unusedBytes = size - outInfo.usedBytes;
    8028 }
    8029 
    8030 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8031 {
    8032  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8033  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8034  const VkDeviceSize size = GetSize();
    8035  const size_t suballoc1stCount = suballocations1st.size();
    8036  const size_t suballoc2ndCount = suballocations2nd.size();
    8037 
    8038  inoutStats.size += size;
    8039 
    8040  VkDeviceSize lastOffset = 0;
    8041 
    8042  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8043  {
    8044  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8045  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8046  while(lastOffset < freeSpace2ndTo1stEnd)
    8047  {
    8048  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8049  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8050  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8051  {
    8052  ++nextAlloc2ndIndex;
    8053  }
    8054 
    8055  // Found non-null allocation.
    8056  if(nextAlloc2ndIndex < suballoc2ndCount)
    8057  {
    8058  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8059 
    8060  // 1. Process free space before this allocation.
    8061  if(lastOffset < suballoc.offset)
    8062  {
    8063  // There is free space from lastOffset to suballoc.offset.
    8064  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8065  inoutStats.unusedSize += unusedRangeSize;
    8066  ++inoutStats.unusedRangeCount;
    8067  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8068  }
    8069 
    8070  // 2. Process this allocation.
    8071  // There is allocation with suballoc.offset, suballoc.size.
    8072  ++inoutStats.allocationCount;
    8073 
    8074  // 3. Prepare for next iteration.
    8075  lastOffset = suballoc.offset + suballoc.size;
    8076  ++nextAlloc2ndIndex;
    8077  }
    8078  // We are at the end.
    8079  else
    8080  {
    8081  if(lastOffset < freeSpace2ndTo1stEnd)
    8082  {
    8083  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8084  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8085  inoutStats.unusedSize += unusedRangeSize;
    8086  ++inoutStats.unusedRangeCount;
    8087  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8088  }
    8089 
    8090  // End of loop.
    8091  lastOffset = freeSpace2ndTo1stEnd;
    8092  }
    8093  }
    8094  }
    8095 
    8096  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8097  const VkDeviceSize freeSpace1stTo2ndEnd =
    8098  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8099  while(lastOffset < freeSpace1stTo2ndEnd)
    8100  {
    8101  // Find next non-null allocation or move nextAllocIndex to the end.
    8102  while(nextAlloc1stIndex < suballoc1stCount &&
    8103  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8104  {
    8105  ++nextAlloc1stIndex;
    8106  }
    8107 
    8108  // Found non-null allocation.
    8109  if(nextAlloc1stIndex < suballoc1stCount)
    8110  {
    8111  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8112 
    8113  // 1. Process free space before this allocation.
    8114  if(lastOffset < suballoc.offset)
    8115  {
    8116  // There is free space from lastOffset to suballoc.offset.
    8117  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8118  inoutStats.unusedSize += unusedRangeSize;
    8119  ++inoutStats.unusedRangeCount;
    8120  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8121  }
    8122 
    8123  // 2. Process this allocation.
    8124  // There is allocation with suballoc.offset, suballoc.size.
    8125  ++inoutStats.allocationCount;
    8126 
    8127  // 3. Prepare for next iteration.
    8128  lastOffset = suballoc.offset + suballoc.size;
    8129  ++nextAlloc1stIndex;
    8130  }
    8131  // We are at the end.
    8132  else
    8133  {
    8134  if(lastOffset < freeSpace1stTo2ndEnd)
    8135  {
    8136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8137  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8138  inoutStats.unusedSize += unusedRangeSize;
    8139  ++inoutStats.unusedRangeCount;
    8140  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8141  }
    8142 
    8143  // End of loop.
    8144  lastOffset = freeSpace1stTo2ndEnd;
    8145  }
    8146  }
    8147 
    8148  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8149  {
    8150  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8151  while(lastOffset < size)
    8152  {
    8153  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8154  while(nextAlloc2ndIndex != SIZE_MAX &&
    8155  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8156  {
    8157  --nextAlloc2ndIndex;
    8158  }
    8159 
    8160  // Found non-null allocation.
    8161  if(nextAlloc2ndIndex != SIZE_MAX)
    8162  {
    8163  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8164 
    8165  // 1. Process free space before this allocation.
    8166  if(lastOffset < suballoc.offset)
    8167  {
    8168  // There is free space from lastOffset to suballoc.offset.
    8169  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8170  inoutStats.unusedSize += unusedRangeSize;
    8171  ++inoutStats.unusedRangeCount;
    8172  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8173  }
    8174 
    8175  // 2. Process this allocation.
    8176  // There is allocation with suballoc.offset, suballoc.size.
    8177  ++inoutStats.allocationCount;
    8178 
    8179  // 3. Prepare for next iteration.
    8180  lastOffset = suballoc.offset + suballoc.size;
    8181  --nextAlloc2ndIndex;
    8182  }
    8183  // We are at the end.
    8184  else
    8185  {
    8186  if(lastOffset < size)
    8187  {
    8188  // There is free space from lastOffset to size.
    8189  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8190  inoutStats.unusedSize += unusedRangeSize;
    8191  ++inoutStats.unusedRangeCount;
    8192  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8193  }
    8194 
    8195  // End of loop.
    8196  lastOffset = size;
    8197  }
    8198  }
    8199  }
    8200 }
    8201 
    8202 #if VMA_STATS_STRING_ENABLED
    8203 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8204 {
    8205  const VkDeviceSize size = GetSize();
    8206  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8207  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8208  const size_t suballoc1stCount = suballocations1st.size();
    8209  const size_t suballoc2ndCount = suballocations2nd.size();
    8210 
    8211  // FIRST PASS
    8212 
    8213  size_t unusedRangeCount = 0;
    8214  VkDeviceSize usedBytes = 0;
    8215 
    8216  VkDeviceSize lastOffset = 0;
    8217 
    8218  size_t alloc2ndCount = 0;
    8219  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8220  {
    8221  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8222  size_t nextAlloc2ndIndex = 0;
    8223  while(lastOffset < freeSpace2ndTo1stEnd)
    8224  {
    8225  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8226  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8227  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8228  {
    8229  ++nextAlloc2ndIndex;
    8230  }
    8231 
    8232  // Found non-null allocation.
    8233  if(nextAlloc2ndIndex < suballoc2ndCount)
    8234  {
    8235  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8236 
    8237  // 1. Process free space before this allocation.
    8238  if(lastOffset < suballoc.offset)
    8239  {
    8240  // There is free space from lastOffset to suballoc.offset.
    8241  ++unusedRangeCount;
    8242  }
    8243 
    8244  // 2. Process this allocation.
    8245  // There is allocation with suballoc.offset, suballoc.size.
    8246  ++alloc2ndCount;
    8247  usedBytes += suballoc.size;
    8248 
    8249  // 3. Prepare for next iteration.
    8250  lastOffset = suballoc.offset + suballoc.size;
    8251  ++nextAlloc2ndIndex;
    8252  }
    8253  // We are at the end.
    8254  else
    8255  {
    8256  if(lastOffset < freeSpace2ndTo1stEnd)
    8257  {
    8258  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8259  ++unusedRangeCount;
    8260  }
    8261 
    8262  // End of loop.
    8263  lastOffset = freeSpace2ndTo1stEnd;
    8264  }
    8265  }
    8266  }
    8267 
    8268  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8269  size_t alloc1stCount = 0;
    8270  const VkDeviceSize freeSpace1stTo2ndEnd =
    8271  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8272  while(lastOffset < freeSpace1stTo2ndEnd)
    8273  {
    8274  // Find next non-null allocation or move nextAllocIndex to the end.
    8275  while(nextAlloc1stIndex < suballoc1stCount &&
    8276  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8277  {
    8278  ++nextAlloc1stIndex;
    8279  }
    8280 
    8281  // Found non-null allocation.
    8282  if(nextAlloc1stIndex < suballoc1stCount)
    8283  {
    8284  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8285 
    8286  // 1. Process free space before this allocation.
    8287  if(lastOffset < suballoc.offset)
    8288  {
    8289  // There is free space from lastOffset to suballoc.offset.
    8290  ++unusedRangeCount;
    8291  }
    8292 
    8293  // 2. Process this allocation.
    8294  // There is allocation with suballoc.offset, suballoc.size.
    8295  ++alloc1stCount;
    8296  usedBytes += suballoc.size;
    8297 
    8298  // 3. Prepare for next iteration.
    8299  lastOffset = suballoc.offset + suballoc.size;
    8300  ++nextAlloc1stIndex;
    8301  }
    8302  // We are at the end.
    8303  else
    8304  {
    8305  if(lastOffset < size)
    8306  {
    8307  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8308  ++unusedRangeCount;
    8309  }
    8310 
    8311  // End of loop.
    8312  lastOffset = freeSpace1stTo2ndEnd;
    8313  }
    8314  }
    8315 
    8316  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8317  {
    8318  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8319  while(lastOffset < size)
    8320  {
    8321  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8322  while(nextAlloc2ndIndex != SIZE_MAX &&
    8323  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8324  {
    8325  --nextAlloc2ndIndex;
    8326  }
    8327 
    8328  // Found non-null allocation.
    8329  if(nextAlloc2ndIndex != SIZE_MAX)
    8330  {
    8331  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8332 
    8333  // 1. Process free space before this allocation.
    8334  if(lastOffset < suballoc.offset)
    8335  {
    8336  // There is free space from lastOffset to suballoc.offset.
    8337  ++unusedRangeCount;
    8338  }
    8339 
    8340  // 2. Process this allocation.
    8341  // There is allocation with suballoc.offset, suballoc.size.
    8342  ++alloc2ndCount;
    8343  usedBytes += suballoc.size;
    8344 
    8345  // 3. Prepare for next iteration.
    8346  lastOffset = suballoc.offset + suballoc.size;
    8347  --nextAlloc2ndIndex;
    8348  }
    8349  // We are at the end.
    8350  else
    8351  {
    8352  if(lastOffset < size)
    8353  {
    8354  // There is free space from lastOffset to size.
    8355  ++unusedRangeCount;
    8356  }
    8357 
    8358  // End of loop.
    8359  lastOffset = size;
    8360  }
    8361  }
    8362  }
    8363 
    8364  const VkDeviceSize unusedBytes = size - usedBytes;
    8365  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8366 
    8367  // SECOND PASS
    8368  lastOffset = 0;
    8369 
    8370  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8371  {
    8372  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8373  size_t nextAlloc2ndIndex = 0;
    8374  while(lastOffset < freeSpace2ndTo1stEnd)
    8375  {
    8376  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8377  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8378  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8379  {
    8380  ++nextAlloc2ndIndex;
    8381  }
    8382 
    8383  // Found non-null allocation.
    8384  if(nextAlloc2ndIndex < suballoc2ndCount)
    8385  {
    8386  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8387 
    8388  // 1. Process free space before this allocation.
    8389  if(lastOffset < suballoc.offset)
    8390  {
    8391  // There is free space from lastOffset to suballoc.offset.
    8392  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8393  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8394  }
    8395 
    8396  // 2. Process this allocation.
    8397  // There is allocation with suballoc.offset, suballoc.size.
    8398  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8399 
    8400  // 3. Prepare for next iteration.
    8401  lastOffset = suballoc.offset + suballoc.size;
    8402  ++nextAlloc2ndIndex;
    8403  }
    8404  // We are at the end.
    8405  else
    8406  {
    8407  if(lastOffset < freeSpace2ndTo1stEnd)
    8408  {
    8409  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8410  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8411  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8412  }
    8413 
    8414  // End of loop.
    8415  lastOffset = freeSpace2ndTo1stEnd;
    8416  }
    8417  }
    8418  }
    8419 
    8420  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8421  while(lastOffset < freeSpace1stTo2ndEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAllocIndex to the end.
    8424  while(nextAlloc1stIndex < suballoc1stCount &&
    8425  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc1stIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc1stIndex < suballoc1stCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8440  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8441  }
    8442 
    8443  // 2. Process this allocation.
    8444  // There is allocation with suballoc.offset, suballoc.size.
    8445  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc1stIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace1stTo2ndEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8457  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8458  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8459  }
    8460 
    8461  // End of loop.
    8462  lastOffset = freeSpace1stTo2ndEnd;
    8463  }
    8464  }
    8465 
    8466  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8467  {
    8468  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8469  while(lastOffset < size)
    8470  {
    8471  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8472  while(nextAlloc2ndIndex != SIZE_MAX &&
    8473  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8474  {
    8475  --nextAlloc2ndIndex;
    8476  }
    8477 
    8478  // Found non-null allocation.
    8479  if(nextAlloc2ndIndex != SIZE_MAX)
    8480  {
    8481  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8482 
    8483  // 1. Process free space before this allocation.
    8484  if(lastOffset < suballoc.offset)
    8485  {
    8486  // There is free space from lastOffset to suballoc.offset.
    8487  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8488  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8494 
    8495  // 3. Prepare for next iteration.
    8496  lastOffset = suballoc.offset + suballoc.size;
    8497  --nextAlloc2ndIndex;
    8498  }
    8499  // We are at the end.
    8500  else
    8501  {
    8502  if(lastOffset < size)
    8503  {
    8504  // There is free space from lastOffset to size.
    8505  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8506  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = size;
    8511  }
    8512  }
    8513  }
    8514 
    8515  PrintDetailedMap_End(json);
    8516 }
    8517 #endif // #if VMA_STATS_STRING_ENABLED
    8518 
    8519 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8520  uint32_t currentFrameIndex,
    8521  uint32_t frameInUseCount,
    8522  VkDeviceSize bufferImageGranularity,
    8523  VkDeviceSize allocSize,
    8524  VkDeviceSize allocAlignment,
    8525  bool upperAddress,
    8526  VmaSuballocationType allocType,
    8527  bool canMakeOtherLost,
    8528  uint32_t strategy,
    8529  VmaAllocationRequest* pAllocationRequest)
    8530 {
    8531  VMA_ASSERT(allocSize > 0);
    8532  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8533  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8534  VMA_HEAVY_ASSERT(Validate());
    8535 
    8536  const VkDeviceSize size = GetSize();
    8537  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8538  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8539 
    8540  if(upperAddress)
    8541  {
    8542  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8543  {
    8544  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8545  return false;
    8546  }
    8547 
    8548  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8549  if(allocSize > size)
    8550  {
    8551  return false;
    8552  }
    8553  VkDeviceSize resultBaseOffset = size - allocSize;
    8554  if(!suballocations2nd.empty())
    8555  {
    8556  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8557  resultBaseOffset = lastSuballoc.offset - allocSize;
    8558  if(allocSize > lastSuballoc.offset)
    8559  {
    8560  return false;
    8561  }
    8562  }
    8563 
    8564  // Start from offset equal to end of free space.
    8565  VkDeviceSize resultOffset = resultBaseOffset;
    8566 
    8567  // Apply VMA_DEBUG_MARGIN at the end.
    8568  if(VMA_DEBUG_MARGIN > 0)
    8569  {
    8570  if(resultOffset < VMA_DEBUG_MARGIN)
    8571  {
    8572  return false;
    8573  }
    8574  resultOffset -= VMA_DEBUG_MARGIN;
    8575  }
    8576 
    8577  // Apply alignment.
    8578  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8579 
    8580  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8581  // Make bigger alignment if necessary.
    8582  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8583  {
    8584  bool bufferImageGranularityConflict = false;
    8585  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8586  {
    8587  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8588  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8589  {
    8590  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8591  {
    8592  bufferImageGranularityConflict = true;
    8593  break;
    8594  }
    8595  }
    8596  else
    8597  // Already on previous page.
    8598  break;
    8599  }
    8600  if(bufferImageGranularityConflict)
    8601  {
    8602  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8603  }
    8604  }
    8605 
    8606  // There is enough free space.
    8607  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8608  suballocations1st.back().offset + suballocations1st.back().size :
    8609  0;
    8610  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8611  {
    8612  // Check previous suballocations for BufferImageGranularity conflicts.
    8613  // If conflict exists, allocation cannot be made here.
    8614  if(bufferImageGranularity > 1)
    8615  {
    8616  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8617  {
    8618  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8619  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8620  {
    8621  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8622  {
    8623  return false;
    8624  }
    8625  }
    8626  else
    8627  {
    8628  // Already on next page.
    8629  break;
    8630  }
    8631  }
    8632  }
    8633 
    8634  // All tests passed: Success.
    8635  pAllocationRequest->offset = resultOffset;
    8636  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8637  pAllocationRequest->sumItemSize = 0;
    8638  // pAllocationRequest->item unused.
    8639  pAllocationRequest->itemsToMakeLostCount = 0;
    8640  return true;
    8641  }
    8642  }
    8643  else // !upperAddress
    8644  {
    8645  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8646  {
    8647  // Try to allocate at the end of 1st vector.
    8648 
    8649  VkDeviceSize resultBaseOffset = 0;
    8650  if(!suballocations1st.empty())
    8651  {
    8652  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8653  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8654  }
    8655 
    8656  // Start from offset equal to beginning of free space.
    8657  VkDeviceSize resultOffset = resultBaseOffset;
    8658 
    8659  // Apply VMA_DEBUG_MARGIN at the beginning.
    8660  if(VMA_DEBUG_MARGIN > 0)
    8661  {
    8662  resultOffset += VMA_DEBUG_MARGIN;
    8663  }
    8664 
    8665  // Apply alignment.
    8666  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8667 
    8668  // Check previous suballocations for BufferImageGranularity conflicts.
    8669  // Make bigger alignment if necessary.
    8670  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8671  {
    8672  bool bufferImageGranularityConflict = false;
    8673  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8674  {
    8675  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8676  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8677  {
    8678  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8679  {
    8680  bufferImageGranularityConflict = true;
    8681  break;
    8682  }
    8683  }
    8684  else
    8685  // Already on previous page.
    8686  break;
    8687  }
    8688  if(bufferImageGranularityConflict)
    8689  {
    8690  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8691  }
    8692  }
    8693 
    8694  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8695  suballocations2nd.back().offset : size;
    8696 
    8697  // There is enough free space at the end after alignment.
    8698  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8699  {
    8700  // Check next suballocations for BufferImageGranularity conflicts.
    8701  // If conflict exists, allocation cannot be made here.
    8702  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8703  {
    8704  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8705  {
    8706  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8707  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8708  {
    8709  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8710  {
    8711  return false;
    8712  }
    8713  }
    8714  else
    8715  {
    8716  // Already on previous page.
    8717  break;
    8718  }
    8719  }
    8720  }
    8721 
    8722  // All tests passed: Success.
    8723  pAllocationRequest->offset = resultOffset;
    8724  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8725  pAllocationRequest->sumItemSize = 0;
    8726  // pAllocationRequest->item unused.
    8727  pAllocationRequest->itemsToMakeLostCount = 0;
    8728  return true;
    8729  }
    8730  }
    8731 
    8732  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8733  // beginning of 1st vector as the end of free space.
    8734  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8735  {
    8736  VMA_ASSERT(!suballocations1st.empty());
    8737 
    8738  VkDeviceSize resultBaseOffset = 0;
    8739  if(!suballocations2nd.empty())
    8740  {
    8741  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8742  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8743  }
    8744 
    8745  // Start from offset equal to beginning of free space.
    8746  VkDeviceSize resultOffset = resultBaseOffset;
    8747 
    8748  // Apply VMA_DEBUG_MARGIN at the beginning.
    8749  if(VMA_DEBUG_MARGIN > 0)
    8750  {
    8751  resultOffset += VMA_DEBUG_MARGIN;
    8752  }
    8753 
    8754  // Apply alignment.
    8755  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8756 
    8757  // Check previous suballocations for BufferImageGranularity conflicts.
    8758  // Make bigger alignment if necessary.
    8759  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8760  {
    8761  bool bufferImageGranularityConflict = false;
    8762  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8763  {
    8764  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8765  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8766  {
    8767  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8768  {
    8769  bufferImageGranularityConflict = true;
    8770  break;
    8771  }
    8772  }
    8773  else
    8774  // Already on previous page.
    8775  break;
    8776  }
    8777  if(bufferImageGranularityConflict)
    8778  {
    8779  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8780  }
    8781  }
    8782 
    8783  pAllocationRequest->itemsToMakeLostCount = 0;
    8784  pAllocationRequest->sumItemSize = 0;
    8785  size_t index1st = m_1stNullItemsBeginCount;
    8786 
    8787  if(canMakeOtherLost)
    8788  {
    8789  while(index1st < suballocations1st.size() &&
    8790  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8791  {
    8792  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8793  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8794  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8795  {
    8796  // No problem.
    8797  }
    8798  else
    8799  {
    8800  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8801  if(suballoc.hAllocation->CanBecomeLost() &&
    8802  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8803  {
    8804  ++pAllocationRequest->itemsToMakeLostCount;
    8805  pAllocationRequest->sumItemSize += suballoc.size;
    8806  }
    8807  else
    8808  {
    8809  return false;
    8810  }
    8811  }
    8812  ++index1st;
    8813  }
    8814 
    8815  // Check next suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, we must mark more allocations lost or fail.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  while(index1st < suballocations1st.size())
    8820  {
    8821  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8822  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8823  {
    8824  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8825  {
    8826  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8827  if(suballoc.hAllocation->CanBecomeLost() &&
    8828  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8829  {
    8830  ++pAllocationRequest->itemsToMakeLostCount;
    8831  pAllocationRequest->sumItemSize += suballoc.size;
    8832  }
    8833  else
    8834  {
    8835  return false;
    8836  }
    8837  }
    8838  }
    8839  else
    8840  {
    8841  // Already on next page.
    8842  break;
    8843  }
    8844  ++index1st;
    8845  }
    8846  }
    8847  }
    8848 
    8849  // There is enough free space at the end after alignment.
    8850  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8851  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8852  {
    8853  // Check next suballocations for BufferImageGranularity conflicts.
    8854  // If conflict exists, allocation cannot be made here.
    8855  if(bufferImageGranularity > 1)
    8856  {
    8857  for(size_t nextSuballocIndex = index1st;
    8858  nextSuballocIndex < suballocations1st.size();
    8859  nextSuballocIndex++)
    8860  {
    8861  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8862  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8863  {
    8864  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8865  {
    8866  return false;
    8867  }
    8868  }
    8869  else
    8870  {
    8871  // Already on next page.
    8872  break;
    8873  }
    8874  }
    8875  }
    8876 
    8877  // All tests passed: Success.
    8878  pAllocationRequest->offset = resultOffset;
    8879  pAllocationRequest->sumFreeSize =
    8880  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8881  - resultBaseOffset
    8882  - pAllocationRequest->sumItemSize;
    8883  // pAllocationRequest->item unused.
    8884  return true;
    8885  }
    8886  }
    8887  }
    8888 
    8889  return false;
    8890 }
    8891 
    8892 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8893  uint32_t currentFrameIndex,
    8894  uint32_t frameInUseCount,
    8895  VmaAllocationRequest* pAllocationRequest)
    8896 {
    8897  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8898  {
    8899  return true;
    8900  }
    8901 
    8902  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8903 
    8904  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8905  size_t index1st = m_1stNullItemsBeginCount;
    8906  size_t madeLostCount = 0;
    8907  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8908  {
    8909  VMA_ASSERT(index1st < suballocations1st.size());
    8910  VmaSuballocation& suballoc = suballocations1st[index1st];
    8911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8912  {
    8913  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8914  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8915  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8916  {
    8917  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8918  suballoc.hAllocation = VK_NULL_HANDLE;
    8919  m_SumFreeSize += suballoc.size;
    8920  ++m_1stNullItemsMiddleCount;
    8921  ++madeLostCount;
    8922  }
    8923  else
    8924  {
    8925  return false;
    8926  }
    8927  }
    8928  ++index1st;
    8929  }
    8930 
    8931  CleanupAfterFree();
    8932  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8933 
    8934  return true;
    8935 }
    8936 
    8937 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8938 {
    8939  uint32_t lostAllocationCount = 0;
    8940 
    8941  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8942  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8943  {
    8944  VmaSuballocation& suballoc = suballocations1st[i];
    8945  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8946  suballoc.hAllocation->CanBecomeLost() &&
    8947  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8948  {
    8949  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8950  suballoc.hAllocation = VK_NULL_HANDLE;
    8951  ++m_1stNullItemsMiddleCount;
    8952  m_SumFreeSize += suballoc.size;
    8953  ++lostAllocationCount;
    8954  }
    8955  }
    8956 
    8957  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8958  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8959  {
    8960  VmaSuballocation& suballoc = suballocations2nd[i];
    8961  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8962  suballoc.hAllocation->CanBecomeLost() &&
    8963  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8964  {
    8965  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8966  suballoc.hAllocation = VK_NULL_HANDLE;
    8967  ++m_2ndNullItemsCount;
    8968  ++lostAllocationCount;
    8969  }
    8970  }
    8971 
    8972  if(lostAllocationCount)
    8973  {
    8974  CleanupAfterFree();
    8975  }
    8976 
    8977  return lostAllocationCount;
    8978 }
    8979 
    8980 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8981 {
    8982  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8983  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8984  {
    8985  const VmaSuballocation& suballoc = suballocations1st[i];
    8986  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8987  {
    8988  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8989  {
    8990  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8991  return VK_ERROR_VALIDATION_FAILED_EXT;
    8992  }
    8993  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8994  {
    8995  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8996  return VK_ERROR_VALIDATION_FAILED_EXT;
    8997  }
    8998  }
    8999  }
    9000 
    9001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9002  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9003  {
    9004  const VmaSuballocation& suballoc = suballocations2nd[i];
    9005  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9006  {
    9007  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9008  {
    9009  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9010  return VK_ERROR_VALIDATION_FAILED_EXT;
    9011  }
    9012  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9013  {
    9014  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9015  return VK_ERROR_VALIDATION_FAILED_EXT;
    9016  }
    9017  }
    9018  }
    9019 
    9020  return VK_SUCCESS;
    9021 }
    9022 
    9023 void VmaBlockMetadata_Linear::Alloc(
    9024  const VmaAllocationRequest& request,
    9025  VmaSuballocationType type,
    9026  VkDeviceSize allocSize,
    9027  bool upperAddress,
    9028  VmaAllocation hAllocation)
    9029 {
    9030  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9031 
    9032  if(upperAddress)
    9033  {
    9034  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9035  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9036  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9037  suballocations2nd.push_back(newSuballoc);
    9038  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9039  }
    9040  else
    9041  {
    9042  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9043 
    9044  // First allocation.
    9045  if(suballocations1st.empty())
    9046  {
    9047  suballocations1st.push_back(newSuballoc);
    9048  }
    9049  else
    9050  {
    9051  // New allocation at the end of 1st vector.
    9052  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9053  {
    9054  // Check if it fits before the end of the block.
    9055  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9056  suballocations1st.push_back(newSuballoc);
    9057  }
    9058  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9059  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9060  {
    9061  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9062 
    9063  switch(m_2ndVectorMode)
    9064  {
    9065  case SECOND_VECTOR_EMPTY:
    9066  // First allocation from second part ring buffer.
    9067  VMA_ASSERT(suballocations2nd.empty());
    9068  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9069  break;
    9070  case SECOND_VECTOR_RING_BUFFER:
    9071  // 2-part ring buffer is already started.
    9072  VMA_ASSERT(!suballocations2nd.empty());
    9073  break;
    9074  case SECOND_VECTOR_DOUBLE_STACK:
    9075  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9076  break;
    9077  default:
    9078  VMA_ASSERT(0);
    9079  }
    9080 
    9081  suballocations2nd.push_back(newSuballoc);
    9082  }
    9083  else
    9084  {
    9085  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9086  }
    9087  }
    9088  }
    9089 
    9090  m_SumFreeSize -= newSuballoc.size;
    9091 }
    9092 
    9093 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9094 {
    9095  FreeAtOffset(allocation->GetOffset());
    9096 }
    9097 
    9098 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9099 {
    9100  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9101  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9102 
    9103  if(!suballocations1st.empty())
    9104  {
    9105  // First allocation: Mark it as next empty at the beginning.
    9106  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9107  if(firstSuballoc.offset == offset)
    9108  {
    9109  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9110  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9111  m_SumFreeSize += firstSuballoc.size;
    9112  ++m_1stNullItemsBeginCount;
    9113  CleanupAfterFree();
    9114  return;
    9115  }
    9116  }
    9117 
    9118  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9119  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9121  {
    9122  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9123  if(lastSuballoc.offset == offset)
    9124  {
    9125  m_SumFreeSize += lastSuballoc.size;
    9126  suballocations2nd.pop_back();
    9127  CleanupAfterFree();
    9128  return;
    9129  }
    9130  }
    9131  // Last allocation in 1st vector.
    9132  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9133  {
    9134  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9135  if(lastSuballoc.offset == offset)
    9136  {
    9137  m_SumFreeSize += lastSuballoc.size;
    9138  suballocations1st.pop_back();
    9139  CleanupAfterFree();
    9140  return;
    9141  }
    9142  }
    9143 
    9144  // Item from the middle of 1st vector.
    9145  {
    9146  VmaSuballocation refSuballoc;
    9147  refSuballoc.offset = offset;
    9148  // Rest of members stays uninitialized intentionally for better performance.
    9149  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9150  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9151  suballocations1st.end(),
    9152  refSuballoc);
    9153  if(it != suballocations1st.end())
    9154  {
    9155  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9156  it->hAllocation = VK_NULL_HANDLE;
    9157  ++m_1stNullItemsMiddleCount;
    9158  m_SumFreeSize += it->size;
    9159  CleanupAfterFree();
    9160  return;
    9161  }
    9162  }
    9163 
    9164  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9165  {
    9166  // Item from the middle of 2nd vector.
    9167  VmaSuballocation refSuballoc;
    9168  refSuballoc.offset = offset;
    9169  // Rest of members stays uninitialized intentionally for better performance.
    9170  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9171  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9172  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9173  if(it != suballocations2nd.end())
    9174  {
    9175  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9176  it->hAllocation = VK_NULL_HANDLE;
    9177  ++m_2ndNullItemsCount;
    9178  m_SumFreeSize += it->size;
    9179  CleanupAfterFree();
    9180  return;
    9181  }
    9182  }
    9183 
    9184  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9185 }
    9186 
    9187 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9188 {
    9189  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9190  const size_t suballocCount = AccessSuballocations1st().size();
    9191  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9192 }
    9193 
    9194 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9195 {
    9196  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9197  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9198 
    9199  if(IsEmpty())
    9200  {
    9201  suballocations1st.clear();
    9202  suballocations2nd.clear();
    9203  m_1stNullItemsBeginCount = 0;
    9204  m_1stNullItemsMiddleCount = 0;
    9205  m_2ndNullItemsCount = 0;
    9206  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9207  }
    9208  else
    9209  {
    9210  const size_t suballoc1stCount = suballocations1st.size();
    9211  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9212  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9213 
    9214  // Find more null items at the beginning of 1st vector.
    9215  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9216  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9217  {
    9218  ++m_1stNullItemsBeginCount;
    9219  --m_1stNullItemsMiddleCount;
    9220  }
    9221 
    9222  // Find more null items at the end of 1st vector.
    9223  while(m_1stNullItemsMiddleCount > 0 &&
    9224  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9225  {
    9226  --m_1stNullItemsMiddleCount;
    9227  suballocations1st.pop_back();
    9228  }
    9229 
    9230  // Find more null items at the end of 2nd vector.
    9231  while(m_2ndNullItemsCount > 0 &&
    9232  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9233  {
    9234  --m_2ndNullItemsCount;
    9235  suballocations2nd.pop_back();
    9236  }
    9237 
    9238  if(ShouldCompact1st())
    9239  {
    9240  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9241  size_t srcIndex = m_1stNullItemsBeginCount;
    9242  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9243  {
    9244  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9245  {
    9246  ++srcIndex;
    9247  }
    9248  if(dstIndex != srcIndex)
    9249  {
    9250  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9251  }
    9252  ++srcIndex;
    9253  }
    9254  suballocations1st.resize(nonNullItemCount);
    9255  m_1stNullItemsBeginCount = 0;
    9256  m_1stNullItemsMiddleCount = 0;
    9257  }
    9258 
    9259  // 2nd vector became empty.
    9260  if(suballocations2nd.empty())
    9261  {
    9262  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9263  }
    9264 
    9265  // 1st vector became empty.
    9266  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9267  {
    9268  suballocations1st.clear();
    9269  m_1stNullItemsBeginCount = 0;
    9270 
    9271  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9272  {
    9273  // Swap 1st with 2nd. Now 2nd is empty.
    9274  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9275  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9276  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9277  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9278  {
    9279  ++m_1stNullItemsBeginCount;
    9280  --m_1stNullItemsMiddleCount;
    9281  }
    9282  m_2ndNullItemsCount = 0;
    9283  m_1stVectorIndex ^= 1;
    9284  }
    9285  }
    9286  }
    9287 
    9288  VMA_HEAVY_ASSERT(Validate());
    9289 }
    9290 
    9291 
    9293 // class VmaBlockMetadata_Buddy
    9294 
    9295 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9296  VmaBlockMetadata(hAllocator),
    9297  m_Root(VMA_NULL),
    9298  m_AllocationCount(0),
    9299  m_FreeCount(1),
    9300  m_SumFreeSize(0)
    9301 {
    9302  memset(m_FreeList, 0, sizeof(m_FreeList));
    9303 }
    9304 
    9305 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9306 {
    9307  DeleteNode(m_Root);
    9308 }
    9309 
    9310 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9311 {
    9312  VmaBlockMetadata::Init(size);
    9313 
    9314  m_UsableSize = VmaPrevPow2(size);
    9315  m_SumFreeSize = m_UsableSize;
    9316 
    9317  // Calculate m_LevelCount.
    9318  m_LevelCount = 1;
    9319  while(m_LevelCount < MAX_LEVELS &&
    9320  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9321  {
    9322  ++m_LevelCount;
    9323  }
    9324 
    9325  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9326  rootNode->offset = 0;
    9327  rootNode->type = Node::TYPE_FREE;
    9328  rootNode->parent = VMA_NULL;
    9329  rootNode->buddy = VMA_NULL;
    9330 
    9331  m_Root = rootNode;
    9332  AddToFreeListFront(0, rootNode);
    9333 }
    9334 
    9335 bool VmaBlockMetadata_Buddy::Validate() const
    9336 {
    9337  // Validate tree.
    9338  ValidationContext ctx;
    9339  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9340  {
    9341  VMA_VALIDATE(false && "ValidateNode failed.");
    9342  }
    9343  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9344  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9345 
    9346  // Validate free node lists.
    9347  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9348  {
    9349  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9350  m_FreeList[level].front->free.prev == VMA_NULL);
    9351 
    9352  for(Node* node = m_FreeList[level].front;
    9353  node != VMA_NULL;
    9354  node = node->free.next)
    9355  {
    9356  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9357 
    9358  if(node->free.next == VMA_NULL)
    9359  {
    9360  VMA_VALIDATE(m_FreeList[level].back == node);
    9361  }
    9362  else
    9363  {
    9364  VMA_VALIDATE(node->free.next->free.prev == node);
    9365  }
    9366  }
    9367  }
    9368 
    9369  // Validate that free lists ar higher levels are empty.
    9370  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9371  {
    9372  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9373  }
    9374 
    9375  return true;
    9376 }
    9377 
    9378 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9379 {
    9380  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9381  {
    9382  if(m_FreeList[level].front != VMA_NULL)
    9383  {
    9384  return LevelToNodeSize(level);
    9385  }
    9386  }
    9387  return 0;
    9388 }
    9389 
    9390 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9391 {
    9392  const VkDeviceSize unusableSize = GetUnusableSize();
    9393 
    9394  outInfo.blockCount = 1;
    9395 
    9396  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9397  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9398 
    9399  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9400  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9401  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9402 
    9403  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9404 
    9405  if(unusableSize > 0)
    9406  {
    9407  ++outInfo.unusedRangeCount;
    9408  outInfo.unusedBytes += unusableSize;
    9409  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9410  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9411  }
    9412 }
    9413 
    9414 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9415 {
    9416  const VkDeviceSize unusableSize = GetUnusableSize();
    9417 
    9418  inoutStats.size += GetSize();
    9419  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9420  inoutStats.allocationCount += m_AllocationCount;
    9421  inoutStats.unusedRangeCount += m_FreeCount;
    9422  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9423 
    9424  if(unusableSize > 0)
    9425  {
    9426  ++inoutStats.unusedRangeCount;
    9427  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9428  }
    9429 }
    9430 
    9431 #if VMA_STATS_STRING_ENABLED
    9432 
    9433 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9434 {
    9435  // TODO optimize
    9436  VmaStatInfo stat;
    9437  CalcAllocationStatInfo(stat);
    9438 
    9439  PrintDetailedMap_Begin(
    9440  json,
    9441  stat.unusedBytes,
    9442  stat.allocationCount,
    9443  stat.unusedRangeCount);
    9444 
    9445  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9446 
    9447  const VkDeviceSize unusableSize = GetUnusableSize();
    9448  if(unusableSize > 0)
    9449  {
    9450  PrintDetailedMap_UnusedRange(json,
    9451  m_UsableSize, // offset
    9452  unusableSize); // size
    9453  }
    9454 
    9455  PrintDetailedMap_End(json);
    9456 }
    9457 
    9458 #endif // #if VMA_STATS_STRING_ENABLED
    9459 
    9460 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9461  uint32_t currentFrameIndex,
    9462  uint32_t frameInUseCount,
    9463  VkDeviceSize bufferImageGranularity,
    9464  VkDeviceSize allocSize,
    9465  VkDeviceSize allocAlignment,
    9466  bool upperAddress,
    9467  VmaSuballocationType allocType,
    9468  bool canMakeOtherLost,
    9469  uint32_t strategy,
    9470  VmaAllocationRequest* pAllocationRequest)
    9471 {
    9472  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9473 
    9474  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9475  // Whenever it might be an OPTIMAL image...
    9476  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9477  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9478  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9479  {
    9480  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9481  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9482  }
    9483 
    9484  if(allocSize > m_UsableSize)
    9485  {
    9486  return false;
    9487  }
    9488 
    9489  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9490  for(uint32_t level = targetLevel + 1; level--; )
    9491  {
    9492  for(Node* freeNode = m_FreeList[level].front;
    9493  freeNode != VMA_NULL;
    9494  freeNode = freeNode->free.next)
    9495  {
    9496  if(freeNode->offset % allocAlignment == 0)
    9497  {
    9498  pAllocationRequest->offset = freeNode->offset;
    9499  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9500  pAllocationRequest->sumItemSize = 0;
    9501  pAllocationRequest->itemsToMakeLostCount = 0;
    9502  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9503  return true;
    9504  }
    9505  }
    9506  }
    9507 
    9508  return false;
    9509 }
    9510 
    9511 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9512  uint32_t currentFrameIndex,
    9513  uint32_t frameInUseCount,
    9514  VmaAllocationRequest* pAllocationRequest)
    9515 {
    9516  /*
    9517  Lost allocations are not supported in buddy allocator at the moment.
    9518  Support might be added in the future.
    9519  */
    9520  return pAllocationRequest->itemsToMakeLostCount == 0;
    9521 }
    9522 
    9523 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9524 {
    9525  /*
    9526  Lost allocations are not supported in buddy allocator at the moment.
    9527  Support might be added in the future.
    9528  */
    9529  return 0;
    9530 }
    9531 
    9532 void VmaBlockMetadata_Buddy::Alloc(
    9533  const VmaAllocationRequest& request,
    9534  VmaSuballocationType type,
    9535  VkDeviceSize allocSize,
    9536  bool upperAddress,
    9537  VmaAllocation hAllocation)
    9538 {
    9539  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9540  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9541 
    9542  Node* currNode = m_FreeList[currLevel].front;
    9543  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9544  while(currNode->offset != request.offset)
    9545  {
    9546  currNode = currNode->free.next;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  }
    9549 
    9550  // Go down, splitting free nodes.
    9551  while(currLevel < targetLevel)
    9552  {
    9553  // currNode is already first free node at currLevel.
    9554  // Remove it from list of free nodes at this currLevel.
    9555  RemoveFromFreeList(currLevel, currNode);
    9556 
    9557  const uint32_t childrenLevel = currLevel + 1;
    9558 
    9559  // Create two free sub-nodes.
    9560  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9561  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9562 
    9563  leftChild->offset = currNode->offset;
    9564  leftChild->type = Node::TYPE_FREE;
    9565  leftChild->parent = currNode;
    9566  leftChild->buddy = rightChild;
    9567 
    9568  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9569  rightChild->type = Node::TYPE_FREE;
    9570  rightChild->parent = currNode;
    9571  rightChild->buddy = leftChild;
    9572 
    9573  // Convert current currNode to split type.
    9574  currNode->type = Node::TYPE_SPLIT;
    9575  currNode->split.leftChild = leftChild;
    9576 
    9577  // Add child nodes to free list. Order is important!
    9578  AddToFreeListFront(childrenLevel, rightChild);
    9579  AddToFreeListFront(childrenLevel, leftChild);
    9580 
    9581  ++m_FreeCount;
    9582  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9583  ++currLevel;
    9584  currNode = m_FreeList[currLevel].front;
    9585 
    9586  /*
    9587  We can be sure that currNode, as left child of node previously split,
    9588  also fullfills the alignment requirement.
    9589  */
    9590  }
    9591 
    9592  // Remove from free list.
    9593  VMA_ASSERT(currLevel == targetLevel &&
    9594  currNode != VMA_NULL &&
    9595  currNode->type == Node::TYPE_FREE);
    9596  RemoveFromFreeList(currLevel, currNode);
    9597 
    9598  // Convert to allocation node.
    9599  currNode->type = Node::TYPE_ALLOCATION;
    9600  currNode->allocation.alloc = hAllocation;
    9601 
    9602  ++m_AllocationCount;
    9603  --m_FreeCount;
    9604  m_SumFreeSize -= allocSize;
    9605 }
    9606 
    9607 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9608 {
    9609  if(node->type == Node::TYPE_SPLIT)
    9610  {
    9611  DeleteNode(node->split.leftChild->buddy);
    9612  DeleteNode(node->split.leftChild);
    9613  }
    9614 
    9615  vma_delete(GetAllocationCallbacks(), node);
    9616 }
    9617 
    9618 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9619 {
    9620  VMA_VALIDATE(level < m_LevelCount);
    9621  VMA_VALIDATE(curr->parent == parent);
    9622  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9623  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9624  switch(curr->type)
    9625  {
    9626  case Node::TYPE_FREE:
    9627  // curr->free.prev, next are validated separately.
    9628  ctx.calculatedSumFreeSize += levelNodeSize;
    9629  ++ctx.calculatedFreeCount;
    9630  break;
    9631  case Node::TYPE_ALLOCATION:
    9632  ++ctx.calculatedAllocationCount;
    9633  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9634  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9635  break;
    9636  case Node::TYPE_SPLIT:
    9637  {
    9638  const uint32_t childrenLevel = level + 1;
    9639  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9640  const Node* const leftChild = curr->split.leftChild;
    9641  VMA_VALIDATE(leftChild != VMA_NULL);
    9642  VMA_VALIDATE(leftChild->offset == curr->offset);
    9643  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9644  {
    9645  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9646  }
    9647  const Node* const rightChild = leftChild->buddy;
    9648  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9649  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9650  {
    9651  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9652  }
    9653  }
    9654  break;
    9655  default:
    9656  return false;
    9657  }
    9658 
    9659  return true;
    9660 }
    9661 
    9662 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9663 {
    9664  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9665  uint32_t level = 0;
    9666  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9667  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9668  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9669  {
    9670  ++level;
    9671  currLevelNodeSize = nextLevelNodeSize;
    9672  nextLevelNodeSize = currLevelNodeSize >> 1;
    9673  }
    9674  return level;
    9675 }
    9676 
    9677 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9678 {
    9679  // Find node and level.
    9680  Node* node = m_Root;
    9681  VkDeviceSize nodeOffset = 0;
    9682  uint32_t level = 0;
    9683  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9684  while(node->type == Node::TYPE_SPLIT)
    9685  {
    9686  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9687  if(offset < nodeOffset + nextLevelSize)
    9688  {
    9689  node = node->split.leftChild;
    9690  }
    9691  else
    9692  {
    9693  node = node->split.leftChild->buddy;
    9694  nodeOffset += nextLevelSize;
    9695  }
    9696  ++level;
    9697  levelNodeSize = nextLevelSize;
    9698  }
    9699 
    9700  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9701  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9702 
    9703  ++m_FreeCount;
    9704  --m_AllocationCount;
    9705  m_SumFreeSize += alloc->GetSize();
    9706 
    9707  node->type = Node::TYPE_FREE;
    9708 
    9709  // Join free nodes if possible.
    9710  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9711  {
    9712  RemoveFromFreeList(level, node->buddy);
    9713  Node* const parent = node->parent;
    9714 
    9715  vma_delete(GetAllocationCallbacks(), node->buddy);
    9716  vma_delete(GetAllocationCallbacks(), node);
    9717  parent->type = Node::TYPE_FREE;
    9718 
    9719  node = parent;
    9720  --level;
    9721  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9722  --m_FreeCount;
    9723  }
    9724 
    9725  AddToFreeListFront(level, node);
    9726 }
    9727 
    9728 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9729 {
    9730  switch(node->type)
    9731  {
    9732  case Node::TYPE_FREE:
    9733  ++outInfo.unusedRangeCount;
    9734  outInfo.unusedBytes += levelNodeSize;
    9735  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9736  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9737  break;
    9738  case Node::TYPE_ALLOCATION:
    9739  {
    9740  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9741  ++outInfo.allocationCount;
    9742  outInfo.usedBytes += allocSize;
    9743  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9744  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9745 
    9746  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9747  if(unusedRangeSize > 0)
    9748  {
    9749  ++outInfo.unusedRangeCount;
    9750  outInfo.unusedBytes += unusedRangeSize;
    9751  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9752  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9753  }
    9754  }
    9755  break;
    9756  case Node::TYPE_SPLIT:
    9757  {
    9758  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9759  const Node* const leftChild = node->split.leftChild;
    9760  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9761  const Node* const rightChild = leftChild->buddy;
    9762  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9763  }
    9764  break;
    9765  default:
    9766  VMA_ASSERT(0);
    9767  }
    9768 }
    9769 
    9770 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9771 {
    9772  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9773 
    9774  // List is empty.
    9775  Node* const frontNode = m_FreeList[level].front;
    9776  if(frontNode == VMA_NULL)
    9777  {
    9778  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9779  node->free.prev = node->free.next = VMA_NULL;
    9780  m_FreeList[level].front = m_FreeList[level].back = node;
    9781  }
    9782  else
    9783  {
    9784  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9785  node->free.prev = VMA_NULL;
    9786  node->free.next = frontNode;
    9787  frontNode->free.prev = node;
    9788  m_FreeList[level].front = node;
    9789  }
    9790 }
    9791 
    9792 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9793 {
    9794  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9795 
    9796  // It is at the front.
    9797  if(node->free.prev == VMA_NULL)
    9798  {
    9799  VMA_ASSERT(m_FreeList[level].front == node);
    9800  m_FreeList[level].front = node->free.next;
    9801  }
    9802  else
    9803  {
    9804  Node* const prevFreeNode = node->free.prev;
    9805  VMA_ASSERT(prevFreeNode->free.next == node);
    9806  prevFreeNode->free.next = node->free.next;
    9807  }
    9808 
    9809  // It is at the back.
    9810  if(node->free.next == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].back == node);
    9813  m_FreeList[level].back = node->free.prev;
    9814  }
    9815  else
    9816  {
    9817  Node* const nextFreeNode = node->free.next;
    9818  VMA_ASSERT(nextFreeNode->free.prev == node);
    9819  nextFreeNode->free.prev = node->free.prev;
    9820  }
    9821 }
    9822 
    9823 #if VMA_STATS_STRING_ENABLED
    9824 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9825 {
    9826  switch(node->type)
    9827  {
    9828  case Node::TYPE_FREE:
    9829  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9830  break;
    9831  case Node::TYPE_ALLOCATION:
    9832  {
    9833  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9834  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9835  if(allocSize < levelNodeSize)
    9836  {
    9837  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9838  }
    9839  }
    9840  break;
    9841  case Node::TYPE_SPLIT:
    9842  {
    9843  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = node->split.leftChild;
    9845  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9846  const Node* const rightChild = leftChild->buddy;
    9847  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9848  }
    9849  break;
    9850  default:
    9851  VMA_ASSERT(0);
    9852  }
    9853 }
    9854 #endif // #if VMA_STATS_STRING_ENABLED
    9855 
    9856 
    9858 // class VmaDeviceMemoryBlock
    9859 
    9860 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9861  m_pMetadata(VMA_NULL),
    9862  m_MemoryTypeIndex(UINT32_MAX),
    9863  m_Id(0),
    9864  m_hMemory(VK_NULL_HANDLE),
    9865  m_MapCount(0),
    9866  m_pMappedData(VMA_NULL)
    9867 {
    9868 }
    9869 
    9870 void VmaDeviceMemoryBlock::Init(
    9871  VmaAllocator hAllocator,
    9872  uint32_t newMemoryTypeIndex,
    9873  VkDeviceMemory newMemory,
    9874  VkDeviceSize newSize,
    9875  uint32_t id,
    9876  uint32_t algorithm)
    9877 {
    9878  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9879 
    9880  m_MemoryTypeIndex = newMemoryTypeIndex;
    9881  m_Id = id;
    9882  m_hMemory = newMemory;
    9883 
    9884  switch(algorithm)
    9885  {
    9887  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9888  break;
    9890  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9891  break;
    9892  default:
    9893  VMA_ASSERT(0);
    9894  // Fall-through.
    9895  case 0:
    9896  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9897  }
    9898  m_pMetadata->Init(newSize);
    9899 }
    9900 
    9901 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9902 {
    9903  // This is the most important assert in the entire library.
    9904  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9905  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9906 
    9907  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9908  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9909  m_hMemory = VK_NULL_HANDLE;
    9910 
    9911  vma_delete(allocator, m_pMetadata);
    9912  m_pMetadata = VMA_NULL;
    9913 }
    9914 
    9915 bool VmaDeviceMemoryBlock::Validate() const
    9916 {
    9917  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9918  (m_pMetadata->GetSize() != 0));
    9919 
    9920  return m_pMetadata->Validate();
    9921 }
    9922 
    9923 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9924 {
    9925  void* pData = nullptr;
    9926  VkResult res = Map(hAllocator, 1, &pData);
    9927  if(res != VK_SUCCESS)
    9928  {
    9929  return res;
    9930  }
    9931 
    9932  res = m_pMetadata->CheckCorruption(pData);
    9933 
    9934  Unmap(hAllocator, 1);
    9935 
    9936  return res;
    9937 }
    9938 
    9939 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9940 {
    9941  if(count == 0)
    9942  {
    9943  return VK_SUCCESS;
    9944  }
    9945 
    9946  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9947  if(m_MapCount != 0)
    9948  {
    9949  m_MapCount += count;
    9950  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9951  if(ppData != VMA_NULL)
    9952  {
    9953  *ppData = m_pMappedData;
    9954  }
    9955  return VK_SUCCESS;
    9956  }
    9957  else
    9958  {
    9959  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9960  hAllocator->m_hDevice,
    9961  m_hMemory,
    9962  0, // offset
    9963  VK_WHOLE_SIZE,
    9964  0, // flags
    9965  &m_pMappedData);
    9966  if(result == VK_SUCCESS)
    9967  {
    9968  if(ppData != VMA_NULL)
    9969  {
    9970  *ppData = m_pMappedData;
    9971  }
    9972  m_MapCount = count;
    9973  }
    9974  return result;
    9975  }
    9976 }
    9977 
    9978 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9979 {
    9980  if(count == 0)
    9981  {
    9982  return;
    9983  }
    9984 
    9985  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9986  if(m_MapCount >= count)
    9987  {
    9988  m_MapCount -= count;
    9989  if(m_MapCount == 0)
    9990  {
    9991  m_pMappedData = VMA_NULL;
    9992  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9993  }
    9994  }
    9995  else
    9996  {
    9997  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    9998  }
    9999 }
    10000 
    10001 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10002 {
    10003  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10004  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10005 
    10006  void* pData;
    10007  VkResult res = Map(hAllocator, 1, &pData);
    10008  if(res != VK_SUCCESS)
    10009  {
    10010  return res;
    10011  }
    10012 
    10013  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10014  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10015 
    10016  Unmap(hAllocator, 1);
    10017 
    10018  return VK_SUCCESS;
    10019 }
    10020 
    10021 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10022 {
    10023  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10024  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10025 
    10026  void* pData;
    10027  VkResult res = Map(hAllocator, 1, &pData);
    10028  if(res != VK_SUCCESS)
    10029  {
    10030  return res;
    10031  }
    10032 
    10033  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10034  {
    10035  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10036  }
    10037  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10040  }
    10041 
    10042  Unmap(hAllocator, 1);
    10043 
    10044  return VK_SUCCESS;
    10045 }
    10046 
    10047 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10048  const VmaAllocator hAllocator,
    10049  const VmaAllocation hAllocation,
    10050  VkBuffer hBuffer)
    10051 {
    10052  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10053  hAllocation->GetBlock() == this);
    10054  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10055  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10056  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10057  hAllocator->m_hDevice,
    10058  hBuffer,
    10059  m_hMemory,
    10060  hAllocation->GetOffset());
    10061 }
    10062 
    10063 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10064  const VmaAllocator hAllocator,
    10065  const VmaAllocation hAllocation,
    10066  VkImage hImage)
    10067 {
    10068  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10069  hAllocation->GetBlock() == this);
    10070  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10071  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10072  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10073  hAllocator->m_hDevice,
    10074  hImage,
    10075  m_hMemory,
    10076  hAllocation->GetOffset());
    10077 }
    10078 
    10079 static void InitStatInfo(VmaStatInfo& outInfo)
    10080 {
    10081  memset(&outInfo, 0, sizeof(outInfo));
    10082  outInfo.allocationSizeMin = UINT64_MAX;
    10083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10084 }
    10085 
    10086 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10087 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10088 {
    10089  inoutInfo.blockCount += srcInfo.blockCount;
    10090  inoutInfo.allocationCount += srcInfo.allocationCount;
    10091  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10092  inoutInfo.usedBytes += srcInfo.usedBytes;
    10093  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10094  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10095  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10096  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10097  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10098 }
    10099 
    10100 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10101 {
    10102  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10103  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10104  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10105  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10106 }
    10107 
    10108 VmaPool_T::VmaPool_T(
    10109  VmaAllocator hAllocator,
    10110  const VmaPoolCreateInfo& createInfo,
    10111  VkDeviceSize preferredBlockSize) :
    10112  m_BlockVector(
    10113  hAllocator,
    10114  createInfo.memoryTypeIndex,
    10115  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10116  createInfo.minBlockCount,
    10117  createInfo.maxBlockCount,
    10118  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10119  createInfo.frameInUseCount,
    10120  true, // isCustomPool
    10121  createInfo.blockSize != 0, // explicitBlockSize
    10122  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10123  m_Id(0)
    10124 {
    10125 }
    10126 
    10127 VmaPool_T::~VmaPool_T()
    10128 {
    10129 }
    10130 
    10131 #if VMA_STATS_STRING_ENABLED
    10132 
    10133 #endif // #if VMA_STATS_STRING_ENABLED
    10134 
    10135 VmaBlockVector::VmaBlockVector(
    10136  VmaAllocator hAllocator,
    10137  uint32_t memoryTypeIndex,
    10138  VkDeviceSize preferredBlockSize,
    10139  size_t minBlockCount,
    10140  size_t maxBlockCount,
    10141  VkDeviceSize bufferImageGranularity,
    10142  uint32_t frameInUseCount,
    10143  bool isCustomPool,
    10144  bool explicitBlockSize,
    10145  uint32_t algorithm) :
    10146  m_hAllocator(hAllocator),
    10147  m_MemoryTypeIndex(memoryTypeIndex),
    10148  m_PreferredBlockSize(preferredBlockSize),
    10149  m_MinBlockCount(minBlockCount),
    10150  m_MaxBlockCount(maxBlockCount),
    10151  m_BufferImageGranularity(bufferImageGranularity),
    10152  m_FrameInUseCount(frameInUseCount),
    10153  m_IsCustomPool(isCustomPool),
    10154  m_ExplicitBlockSize(explicitBlockSize),
    10155  m_Algorithm(algorithm),
    10156  m_HasEmptyBlock(false),
    10157  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10158  m_pDefragmentator(VMA_NULL),
    10159  m_NextBlockId(0)
    10160 {
    10161 }
    10162 
    10163 VmaBlockVector::~VmaBlockVector()
    10164 {
    10165  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10166 
    10167  for(size_t i = m_Blocks.size(); i--; )
    10168  {
    10169  m_Blocks[i]->Destroy(m_hAllocator);
    10170  vma_delete(m_hAllocator, m_Blocks[i]);
    10171  }
    10172 }
    10173 
    10174 VkResult VmaBlockVector::CreateMinBlocks()
    10175 {
    10176  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10177  {
    10178  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10179  if(res != VK_SUCCESS)
    10180  {
    10181  return res;
    10182  }
    10183  }
    10184  return VK_SUCCESS;
    10185 }
    10186 
    10187 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10188 {
    10189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10190 
    10191  const size_t blockCount = m_Blocks.size();
    10192 
    10193  pStats->size = 0;
    10194  pStats->unusedSize = 0;
    10195  pStats->allocationCount = 0;
    10196  pStats->unusedRangeCount = 0;
    10197  pStats->unusedRangeSizeMax = 0;
    10198  pStats->blockCount = blockCount;
    10199 
    10200  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10201  {
    10202  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10203  VMA_ASSERT(pBlock);
    10204  VMA_HEAVY_ASSERT(pBlock->Validate());
    10205  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10206  }
    10207 }
    10208 
    10209 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10210 {
    10211  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10212  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10213  (VMA_DEBUG_MARGIN > 0) &&
    10214  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10215 }
    10216 
    10217 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10218 
    10219 VkResult VmaBlockVector::Allocate(
    10220  VmaPool hCurrentPool,
    10221  uint32_t currentFrameIndex,
    10222  VkDeviceSize size,
    10223  VkDeviceSize alignment,
    10224  const VmaAllocationCreateInfo& createInfo,
    10225  VmaSuballocationType suballocType,
    10226  VmaAllocation* pAllocation)
    10227 {
    10228  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10229  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10230  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10231  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10232  const bool canCreateNewBlock =
    10233  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10234  (m_Blocks.size() < m_MaxBlockCount);
    10235  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10236 
    10237  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10238  // Which in turn is available only when maxBlockCount = 1.
    10239  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10240  {
    10241  canMakeOtherLost = false;
    10242  }
    10243 
    10244  // Upper address can only be used with linear allocator and within single memory block.
    10245  if(isUpperAddress &&
    10246  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10247  {
    10248  return VK_ERROR_FEATURE_NOT_PRESENT;
    10249  }
    10250 
    10251  // Validate strategy.
    10252  switch(strategy)
    10253  {
    10254  case 0:
    10256  break;
    10260  break;
    10261  default:
    10262  return VK_ERROR_FEATURE_NOT_PRESENT;
    10263  }
    10264 
    10265  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10266  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10267  {
    10268  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10269  }
    10270 
    10271  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10272 
    10273  /*
    10274  Under certain condition, this whole section can be skipped for optimization, so
    10275  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10276  e.g. for custom pools with linear algorithm.
    10277  */
    10278  if(!canMakeOtherLost || canCreateNewBlock)
    10279  {
    10280  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10281  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10283 
    10284  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10285  {
    10286  // Use only last block.
    10287  if(!m_Blocks.empty())
    10288  {
    10289  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10290  VMA_ASSERT(pCurrBlock);
    10291  VkResult res = AllocateFromBlock(
    10292  pCurrBlock,
    10293  hCurrentPool,
    10294  currentFrameIndex,
    10295  size,
    10296  alignment,
    10297  allocFlagsCopy,
    10298  createInfo.pUserData,
    10299  suballocType,
    10300  strategy,
    10301  pAllocation);
    10302  if(res == VK_SUCCESS)
    10303  {
    10304  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10305  return VK_SUCCESS;
    10306  }
    10307  }
    10308  }
    10309  else
    10310  {
    10312  {
    10313  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10314  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10315  {
    10316  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10317  VMA_ASSERT(pCurrBlock);
    10318  VkResult res = AllocateFromBlock(
    10319  pCurrBlock,
    10320  hCurrentPool,
    10321  currentFrameIndex,
    10322  size,
    10323  alignment,
    10324  allocFlagsCopy,
    10325  createInfo.pUserData,
    10326  suballocType,
    10327  strategy,
    10328  pAllocation);
    10329  if(res == VK_SUCCESS)
    10330  {
    10331  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10332  return VK_SUCCESS;
    10333  }
    10334  }
    10335  }
    10336  else // WORST_FIT, FIRST_FIT
    10337  {
    10338  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10339  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10340  {
    10341  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10342  VMA_ASSERT(pCurrBlock);
    10343  VkResult res = AllocateFromBlock(
    10344  pCurrBlock,
    10345  hCurrentPool,
    10346  currentFrameIndex,
    10347  size,
    10348  alignment,
    10349  allocFlagsCopy,
    10350  createInfo.pUserData,
    10351  suballocType,
    10352  strategy,
    10353  pAllocation);
    10354  if(res == VK_SUCCESS)
    10355  {
    10356  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10357  return VK_SUCCESS;
    10358  }
    10359  }
    10360  }
    10361  }
    10362 
    10363  // 2. Try to create new block.
    10364  if(canCreateNewBlock)
    10365  {
    10366  // Calculate optimal size for new block.
    10367  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10368  uint32_t newBlockSizeShift = 0;
    10369  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10370 
    10371  if(!m_ExplicitBlockSize)
    10372  {
    10373  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10374  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10375  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10376  {
    10377  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10378  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10379  {
    10380  newBlockSize = smallerNewBlockSize;
    10381  ++newBlockSizeShift;
    10382  }
    10383  else
    10384  {
    10385  break;
    10386  }
    10387  }
    10388  }
    10389 
    10390  size_t newBlockIndex = 0;
    10391  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10392  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10393  if(!m_ExplicitBlockSize)
    10394  {
    10395  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10396  {
    10397  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10398  if(smallerNewBlockSize >= size)
    10399  {
    10400  newBlockSize = smallerNewBlockSize;
    10401  ++newBlockSizeShift;
    10402  res = CreateBlock(newBlockSize, &newBlockIndex);
    10403  }
    10404  else
    10405  {
    10406  break;
    10407  }
    10408  }
    10409  }
    10410 
    10411  if(res == VK_SUCCESS)
    10412  {
    10413  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10414  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10415 
    10416  res = AllocateFromBlock(
    10417  pBlock,
    10418  hCurrentPool,
    10419  currentFrameIndex,
    10420  size,
    10421  alignment,
    10422  allocFlagsCopy,
    10423  createInfo.pUserData,
    10424  suballocType,
    10425  strategy,
    10426  pAllocation);
    10427  if(res == VK_SUCCESS)
    10428  {
    10429  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10430  return VK_SUCCESS;
    10431  }
    10432  else
    10433  {
    10434  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10435  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10436  }
    10437  }
    10438  }
    10439  }
    10440 
    10441  // 3. Try to allocate from existing blocks with making other allocations lost.
    10442  if(canMakeOtherLost)
    10443  {
    10444  uint32_t tryIndex = 0;
    10445  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10446  {
    10447  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10448  VmaAllocationRequest bestRequest = {};
    10449  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10450 
    10451  // 1. Search existing allocations.
    10453  {
    10454  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10455  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10456  {
    10457  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10458  VMA_ASSERT(pCurrBlock);
    10459  VmaAllocationRequest currRequest = {};
    10460  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10461  currentFrameIndex,
    10462  m_FrameInUseCount,
    10463  m_BufferImageGranularity,
    10464  size,
    10465  alignment,
    10466  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10467  suballocType,
    10468  canMakeOtherLost,
    10469  strategy,
    10470  &currRequest))
    10471  {
    10472  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10473  if(pBestRequestBlock == VMA_NULL ||
    10474  currRequestCost < bestRequestCost)
    10475  {
    10476  pBestRequestBlock = pCurrBlock;
    10477  bestRequest = currRequest;
    10478  bestRequestCost = currRequestCost;
    10479 
    10480  if(bestRequestCost == 0)
    10481  {
    10482  break;
    10483  }
    10484  }
    10485  }
    10486  }
    10487  }
    10488  else // WORST_FIT, FIRST_FIT
    10489  {
    10490  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10491  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10494  VMA_ASSERT(pCurrBlock);
    10495  VmaAllocationRequest currRequest = {};
    10496  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10497  currentFrameIndex,
    10498  m_FrameInUseCount,
    10499  m_BufferImageGranularity,
    10500  size,
    10501  alignment,
    10502  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10503  suballocType,
    10504  canMakeOtherLost,
    10505  strategy,
    10506  &currRequest))
    10507  {
    10508  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10509  if(pBestRequestBlock == VMA_NULL ||
    10510  currRequestCost < bestRequestCost ||
    10512  {
    10513  pBestRequestBlock = pCurrBlock;
    10514  bestRequest = currRequest;
    10515  bestRequestCost = currRequestCost;
    10516 
    10517  if(bestRequestCost == 0 ||
    10519  {
    10520  break;
    10521  }
    10522  }
    10523  }
    10524  }
    10525  }
    10526 
    10527  if(pBestRequestBlock != VMA_NULL)
    10528  {
    10529  if(mapped)
    10530  {
    10531  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10532  if(res != VK_SUCCESS)
    10533  {
    10534  return res;
    10535  }
    10536  }
    10537 
    10538  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10539  currentFrameIndex,
    10540  m_FrameInUseCount,
    10541  &bestRequest))
    10542  {
    10543  // We no longer have an empty Allocation.
    10544  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10545  {
    10546  m_HasEmptyBlock = false;
    10547  }
    10548  // Allocate from this pBlock.
    10549  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10550  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10551  (*pAllocation)->InitBlockAllocation(
    10552  hCurrentPool,
    10553  pBestRequestBlock,
    10554  bestRequest.offset,
    10555  alignment,
    10556  size,
    10557  suballocType,
    10558  mapped,
    10559  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10560  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10561  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10562  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10563  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10564  {
    10565  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10566  }
    10567  if(IsCorruptionDetectionEnabled())
    10568  {
    10569  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10570  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10571  }
    10572  return VK_SUCCESS;
    10573  }
    10574  // else: Some allocations must have been touched while we are here. Next try.
    10575  }
    10576  else
    10577  {
    10578  // Could not find place in any of the blocks - break outer loop.
    10579  break;
    10580  }
    10581  }
    10582  /* Maximum number of tries exceeded - a very unlike event when many other
    10583  threads are simultaneously touching allocations making it impossible to make
    10584  lost at the same time as we try to allocate. */
    10585  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10586  {
    10587  return VK_ERROR_TOO_MANY_OBJECTS;
    10588  }
    10589  }
    10590 
    10591  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10592 }
    10593 
    10594 void VmaBlockVector::Free(
    10595  VmaAllocation hAllocation)
    10596 {
    10597  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10598 
    10599  // Scope for lock.
    10600  {
    10601  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10602 
    10603  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10604 
    10605  if(IsCorruptionDetectionEnabled())
    10606  {
    10607  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10608  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10609  }
    10610 
    10611  if(hAllocation->IsPersistentMap())
    10612  {
    10613  pBlock->Unmap(m_hAllocator, 1);
    10614  }
    10615 
    10616  pBlock->m_pMetadata->Free(hAllocation);
    10617  VMA_HEAVY_ASSERT(pBlock->Validate());
    10618 
    10619  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10620 
    10621  // pBlock became empty after this deallocation.
    10622  if(pBlock->m_pMetadata->IsEmpty())
    10623  {
    10624  // Already has empty Allocation. We don't want to have two, so delete this one.
    10625  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10626  {
    10627  pBlockToDelete = pBlock;
    10628  Remove(pBlock);
    10629  }
    10630  // We now have first empty block.
    10631  else
    10632  {
    10633  m_HasEmptyBlock = true;
    10634  }
    10635  }
    10636  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10637  // (This is optional, heuristics.)
    10638  else if(m_HasEmptyBlock)
    10639  {
    10640  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10641  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10642  {
    10643  pBlockToDelete = pLastBlock;
    10644  m_Blocks.pop_back();
    10645  m_HasEmptyBlock = false;
    10646  }
    10647  }
    10648 
    10649  IncrementallySortBlocks();
    10650  }
    10651 
    10652  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10653  // lock, for performance reason.
    10654  if(pBlockToDelete != VMA_NULL)
    10655  {
    10656  VMA_DEBUG_LOG(" Deleted empty allocation");
    10657  pBlockToDelete->Destroy(m_hAllocator);
    10658  vma_delete(m_hAllocator, pBlockToDelete);
    10659  }
    10660 }
    10661 
    10662 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10663 {
    10664  VkDeviceSize result = 0;
    10665  for(size_t i = m_Blocks.size(); i--; )
    10666  {
    10667  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10668  if(result >= m_PreferredBlockSize)
    10669  {
    10670  break;
    10671  }
    10672  }
    10673  return result;
    10674 }
    10675 
    10676 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10677 {
    10678  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10679  {
    10680  if(m_Blocks[blockIndex] == pBlock)
    10681  {
    10682  VmaVectorRemove(m_Blocks, blockIndex);
    10683  return;
    10684  }
    10685  }
    10686  VMA_ASSERT(0);
    10687 }
    10688 
    10689 void VmaBlockVector::IncrementallySortBlocks()
    10690 {
    10691  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10692  {
    10693  // Bubble sort only until first swap.
    10694  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10695  {
    10696  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10697  {
    10698  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10699  return;
    10700  }
    10701  }
    10702  }
    10703 }
    10704 
    10705 VkResult VmaBlockVector::AllocateFromBlock(
    10706  VmaDeviceMemoryBlock* pBlock,
    10707  VmaPool hCurrentPool,
    10708  uint32_t currentFrameIndex,
    10709  VkDeviceSize size,
    10710  VkDeviceSize alignment,
    10711  VmaAllocationCreateFlags allocFlags,
    10712  void* pUserData,
    10713  VmaSuballocationType suballocType,
    10714  uint32_t strategy,
    10715  VmaAllocation* pAllocation)
    10716 {
    10717  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10718  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10719  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10720  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10721 
    10722  VmaAllocationRequest currRequest = {};
    10723  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10724  currentFrameIndex,
    10725  m_FrameInUseCount,
    10726  m_BufferImageGranularity,
    10727  size,
    10728  alignment,
    10729  isUpperAddress,
    10730  suballocType,
    10731  false, // canMakeOtherLost
    10732  strategy,
    10733  &currRequest))
    10734  {
    10735  // Allocate from pCurrBlock.
    10736  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10737 
    10738  if(mapped)
    10739  {
    10740  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10741  if(res != VK_SUCCESS)
    10742  {
    10743  return res;
    10744  }
    10745  }
    10746 
    10747  // We no longer have an empty Allocation.
    10748  if(pBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752 
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBlock,
    10758  currRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBlock->Validate());
    10765  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10778 }
    10779 
    10780 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10781 {
    10782  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10783  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10784  allocInfo.allocationSize = blockSize;
    10785  VkDeviceMemory mem = VK_NULL_HANDLE;
    10786  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10787  if(res < 0)
    10788  {
    10789  return res;
    10790  }
    10791 
    10792  // New VkDeviceMemory successfully created.
    10793 
    10794  // Create new Allocation for it.
    10795  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10796  pBlock->Init(
    10797  m_hAllocator,
    10798  m_MemoryTypeIndex,
    10799  mem,
    10800  allocInfo.allocationSize,
    10801  m_NextBlockId++,
    10802  m_Algorithm);
    10803 
    10804  m_Blocks.push_back(pBlock);
    10805  if(pNewBlockIndex != VMA_NULL)
    10806  {
    10807  *pNewBlockIndex = m_Blocks.size() - 1;
    10808  }
    10809 
    10810  return VK_SUCCESS;
    10811 }
    10812 
    10813 #if VMA_STATS_STRING_ENABLED
    10814 
    10815 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10816 {
    10817  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10818 
    10819  json.BeginObject();
    10820 
    10821  if(m_IsCustomPool)
    10822  {
    10823  json.WriteString("MemoryTypeIndex");
    10824  json.WriteNumber(m_MemoryTypeIndex);
    10825 
    10826  json.WriteString("BlockSize");
    10827  json.WriteNumber(m_PreferredBlockSize);
    10828 
    10829  json.WriteString("BlockCount");
    10830  json.BeginObject(true);
    10831  if(m_MinBlockCount > 0)
    10832  {
    10833  json.WriteString("Min");
    10834  json.WriteNumber((uint64_t)m_MinBlockCount);
    10835  }
    10836  if(m_MaxBlockCount < SIZE_MAX)
    10837  {
    10838  json.WriteString("Max");
    10839  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10840  }
    10841  json.WriteString("Cur");
    10842  json.WriteNumber((uint64_t)m_Blocks.size());
    10843  json.EndObject();
    10844 
    10845  if(m_FrameInUseCount > 0)
    10846  {
    10847  json.WriteString("FrameInUseCount");
    10848  json.WriteNumber(m_FrameInUseCount);
    10849  }
    10850 
    10851  if(m_Algorithm != 0)
    10852  {
    10853  json.WriteString("Algorithm");
    10854  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10855  }
    10856  }
    10857  else
    10858  {
    10859  json.WriteString("PreferredBlockSize");
    10860  json.WriteNumber(m_PreferredBlockSize);
    10861  }
    10862 
    10863  json.WriteString("Blocks");
    10864  json.BeginObject();
    10865  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10866  {
    10867  json.BeginString();
    10868  json.ContinueString(m_Blocks[i]->GetId());
    10869  json.EndString();
    10870 
    10871  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10872  }
    10873  json.EndObject();
    10874 
    10875  json.EndObject();
    10876 }
    10877 
    10878 #endif // #if VMA_STATS_STRING_ENABLED
    10879 
    10880 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10881  VmaAllocator hAllocator,
    10882  uint32_t currentFrameIndex)
    10883 {
    10884  if(m_pDefragmentator == VMA_NULL)
    10885  {
    10886  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10887  hAllocator,
    10888  this,
    10889  currentFrameIndex);
    10890  }
    10891 
    10892  return m_pDefragmentator;
    10893 }
    10894 
    10895 VkResult VmaBlockVector::Defragment(
    10896  VmaDefragmentationStats* pDefragmentationStats,
    10897  VkDeviceSize& maxBytesToMove,
    10898  uint32_t& maxAllocationsToMove)
    10899 {
    10900  if(m_pDefragmentator == VMA_NULL)
    10901  {
    10902  return VK_SUCCESS;
    10903  }
    10904 
    10905  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10906 
    10907  // Defragment.
    10908  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10909 
    10910  // Accumulate statistics.
    10911  if(pDefragmentationStats != VMA_NULL)
    10912  {
    10913  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10914  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10915  pDefragmentationStats->bytesMoved += bytesMoved;
    10916  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10917  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10918  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10919  maxBytesToMove -= bytesMoved;
    10920  maxAllocationsToMove -= allocationsMoved;
    10921  }
    10922 
    10923  // Free empty blocks.
    10924  m_HasEmptyBlock = false;
    10925  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10926  {
    10927  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10928  if(pBlock->m_pMetadata->IsEmpty())
    10929  {
    10930  if(m_Blocks.size() > m_MinBlockCount)
    10931  {
    10932  if(pDefragmentationStats != VMA_NULL)
    10933  {
    10934  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10935  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10936  }
    10937 
    10938  VmaVectorRemove(m_Blocks, blockIndex);
    10939  pBlock->Destroy(m_hAllocator);
    10940  vma_delete(m_hAllocator, pBlock);
    10941  }
    10942  else
    10943  {
    10944  m_HasEmptyBlock = true;
    10945  }
    10946  }
    10947  }
    10948 
    10949  return result;
    10950 }
    10951 
    10952 void VmaBlockVector::DestroyDefragmentator()
    10953 {
    10954  if(m_pDefragmentator != VMA_NULL)
    10955  {
    10956  vma_delete(m_hAllocator, m_pDefragmentator);
    10957  m_pDefragmentator = VMA_NULL;
    10958  }
    10959 }
    10960 
    10961 void VmaBlockVector::MakePoolAllocationsLost(
    10962  uint32_t currentFrameIndex,
    10963  size_t* pLostAllocationCount)
    10964 {
    10965  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10966  size_t lostAllocationCount = 0;
    10967  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10968  {
    10969  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10970  VMA_ASSERT(pBlock);
    10971  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10972  }
    10973  if(pLostAllocationCount != VMA_NULL)
    10974  {
    10975  *pLostAllocationCount = lostAllocationCount;
    10976  }
    10977 }
    10978 
    10979 VkResult VmaBlockVector::CheckCorruption()
    10980 {
    10981  if(!IsCorruptionDetectionEnabled())
    10982  {
    10983  return VK_ERROR_FEATURE_NOT_PRESENT;
    10984  }
    10985 
    10986  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10987  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10988  {
    10989  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10990  VMA_ASSERT(pBlock);
    10991  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10992  if(res != VK_SUCCESS)
    10993  {
    10994  return res;
    10995  }
    10996  }
    10997  return VK_SUCCESS;
    10998 }
    10999 
    11000 void VmaBlockVector::AddStats(VmaStats* pStats)
    11001 {
    11002  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11003  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11004 
    11005  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11006 
    11007  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11008  {
    11009  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11010  VMA_ASSERT(pBlock);
    11011  VMA_HEAVY_ASSERT(pBlock->Validate());
    11012  VmaStatInfo allocationStatInfo;
    11013  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11014  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11015  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11016  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11017  }
    11018 }
    11019 
    11021 // VmaDefragmentator members definition
    11022 
    11023 VmaDefragmentator::VmaDefragmentator(
    11024  VmaAllocator hAllocator,
    11025  VmaBlockVector* pBlockVector,
    11026  uint32_t currentFrameIndex) :
    11027  m_hAllocator(hAllocator),
    11028  m_pBlockVector(pBlockVector),
    11029  m_CurrentFrameIndex(currentFrameIndex),
    11030  m_BytesMoved(0),
    11031  m_AllocationsMoved(0),
    11032  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11033  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11034 {
    11035  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11036 }
    11037 
    11038 VmaDefragmentator::~VmaDefragmentator()
    11039 {
    11040  for(size_t i = m_Blocks.size(); i--; )
    11041  {
    11042  vma_delete(m_hAllocator, m_Blocks[i]);
    11043  }
    11044 }
    11045 
    11046 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11047 {
    11048  AllocationInfo allocInfo;
    11049  allocInfo.m_hAllocation = hAlloc;
    11050  allocInfo.m_pChanged = pChanged;
    11051  m_Allocations.push_back(allocInfo);
    11052 }
    11053 
    11054 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11055 {
    11056  // It has already been mapped for defragmentation.
    11057  if(m_pMappedDataForDefragmentation)
    11058  {
    11059  *ppMappedData = m_pMappedDataForDefragmentation;
    11060  return VK_SUCCESS;
    11061  }
    11062 
    11063  // It is originally mapped.
    11064  if(m_pBlock->GetMappedData())
    11065  {
    11066  *ppMappedData = m_pBlock->GetMappedData();
    11067  return VK_SUCCESS;
    11068  }
    11069 
    11070  // Map on first usage.
    11071  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return res;
    11074 }
    11075 
    11076 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11077 {
    11078  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11079  {
    11080  m_pBlock->Unmap(hAllocator, 1);
    11081  }
    11082 }
    11083 
    11084 VkResult VmaDefragmentator::DefragmentRound(
    11085  VkDeviceSize maxBytesToMove,
    11086  uint32_t maxAllocationsToMove)
    11087 {
    11088  if(m_Blocks.empty())
    11089  {
    11090  return VK_SUCCESS;
    11091  }
    11092 
    11093  size_t srcBlockIndex = m_Blocks.size() - 1;
    11094  size_t srcAllocIndex = SIZE_MAX;
    11095  for(;;)
    11096  {
    11097  // 1. Find next allocation to move.
    11098  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11099  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11100  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11101  {
    11102  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11103  {
    11104  // Finished: no more allocations to process.
    11105  if(srcBlockIndex == 0)
    11106  {
    11107  return VK_SUCCESS;
    11108  }
    11109  else
    11110  {
    11111  --srcBlockIndex;
    11112  srcAllocIndex = SIZE_MAX;
    11113  }
    11114  }
    11115  else
    11116  {
    11117  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11118  }
    11119  }
    11120 
    11121  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11122  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11123 
    11124  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11125  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11126  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11127  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11128 
    11129  // 2. Try to find new place for this allocation in preceding or current block.
    11130  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11131  {
    11132  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11133  VmaAllocationRequest dstAllocRequest;
    11134  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11135  m_CurrentFrameIndex,
    11136  m_pBlockVector->GetFrameInUseCount(),
    11137  m_pBlockVector->GetBufferImageGranularity(),
    11138  size,
    11139  alignment,
    11140  false, // upperAddress
    11141  suballocType,
    11142  false, // canMakeOtherLost
    11144  &dstAllocRequest) &&
    11145  MoveMakesSense(
    11146  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11147  {
    11148  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11149 
    11150  // Reached limit on number of allocations or bytes to move.
    11151  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11152  (m_BytesMoved + size > maxBytesToMove))
    11153  {
    11154  return VK_INCOMPLETE;
    11155  }
    11156 
    11157  void* pDstMappedData = VMA_NULL;
    11158  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11159  if(res != VK_SUCCESS)
    11160  {
    11161  return res;
    11162  }
    11163 
    11164  void* pSrcMappedData = VMA_NULL;
    11165  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11166  if(res != VK_SUCCESS)
    11167  {
    11168  return res;
    11169  }
    11170 
    11171  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11172  memcpy(
    11173  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11174  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11175  static_cast<size_t>(size));
    11176 
    11177  if(VMA_DEBUG_MARGIN > 0)
    11178  {
    11179  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11180  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11181  }
    11182 
    11183  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11184  dstAllocRequest,
    11185  suballocType,
    11186  size,
    11187  false, // upperAddress
    11188  allocInfo.m_hAllocation);
    11189  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11190 
    11191  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11192 
    11193  if(allocInfo.m_pChanged != VMA_NULL)
    11194  {
    11195  *allocInfo.m_pChanged = VK_TRUE;
    11196  }
    11197 
    11198  ++m_AllocationsMoved;
    11199  m_BytesMoved += size;
    11200 
    11201  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11202 
    11203  break;
    11204  }
    11205  }
    11206 
    11207  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11208 
    11209  if(srcAllocIndex > 0)
    11210  {
    11211  --srcAllocIndex;
    11212  }
    11213  else
    11214  {
    11215  if(srcBlockIndex > 0)
    11216  {
    11217  --srcBlockIndex;
    11218  srcAllocIndex = SIZE_MAX;
    11219  }
    11220  else
    11221  {
    11222  return VK_SUCCESS;
    11223  }
    11224  }
    11225  }
    11226 }
    11227 
    11228 VkResult VmaDefragmentator::Defragment(
    11229  VkDeviceSize maxBytesToMove,
    11230  uint32_t maxAllocationsToMove)
    11231 {
    11232  if(m_Allocations.empty())
    11233  {
    11234  return VK_SUCCESS;
    11235  }
    11236 
    11237  // Create block info for each block.
    11238  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11239  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11240  {
    11241  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11242  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11243  m_Blocks.push_back(pBlockInfo);
    11244  }
    11245 
    11246  // Sort them by m_pBlock pointer value.
    11247  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11248 
    11249  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11250  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11251  {
    11252  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11253  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11254  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11255  {
    11256  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11257  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11258  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11259  {
    11260  (*it)->m_Allocations.push_back(allocInfo);
    11261  }
    11262  else
    11263  {
    11264  VMA_ASSERT(0);
    11265  }
    11266  }
    11267  }
    11268  m_Allocations.clear();
    11269 
    11270  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11271  {
    11272  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11273  pBlockInfo->CalcHasNonMovableAllocations();
    11274  pBlockInfo->SortAllocationsBySizeDescecnding();
    11275  }
    11276 
    11277  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11278  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11279 
    11280  // Execute defragmentation rounds (the main part).
    11281  VkResult result = VK_SUCCESS;
    11282  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11283  {
    11284  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11285  }
    11286 
    11287  // Unmap blocks that were mapped for defragmentation.
    11288  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11289  {
    11290  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11291  }
    11292 
    11293  return result;
    11294 }
    11295 
    11296 bool VmaDefragmentator::MoveMakesSense(
    11297  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11298  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11299 {
    11300  if(dstBlockIndex < srcBlockIndex)
    11301  {
    11302  return true;
    11303  }
    11304  if(dstBlockIndex > srcBlockIndex)
    11305  {
    11306  return false;
    11307  }
    11308  if(dstOffset < srcOffset)
    11309  {
    11310  return true;
    11311  }
    11312  return false;
    11313 }
    11314 
    11316 // VmaRecorder
    11317 
    11318 #if VMA_RECORDING_ENABLED
    11319 
    11320 VmaRecorder::VmaRecorder() :
    11321  m_UseMutex(true),
    11322  m_Flags(0),
    11323  m_File(VMA_NULL),
    11324  m_Freq(INT64_MAX),
    11325  m_StartCounter(INT64_MAX)
    11326 {
    11327 }
    11328 
    11329 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11330 {
    11331  m_UseMutex = useMutex;
    11332  m_Flags = settings.flags;
    11333 
    11334  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11335  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11336 
    11337  // Open file for writing.
    11338  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11339  if(err != 0)
    11340  {
    11341  return VK_ERROR_INITIALIZATION_FAILED;
    11342  }
    11343 
    11344  // Write header.
    11345  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11346  fprintf(m_File, "%s\n", "1,3");
    11347 
    11348  return VK_SUCCESS;
    11349 }
    11350 
    11351 VmaRecorder::~VmaRecorder()
    11352 {
    11353  if(m_File != VMA_NULL)
    11354  {
    11355  fclose(m_File);
    11356  }
    11357 }
    11358 
    11359 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11360 {
    11361  CallParams callParams;
    11362  GetBasicParams(callParams);
    11363 
    11364  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11365  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11366  Flush();
    11367 }
    11368 
    11369 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11370 {
    11371  CallParams callParams;
    11372  GetBasicParams(callParams);
    11373 
    11374  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11375  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11376  Flush();
    11377 }
    11378 
    11379 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11380 {
    11381  CallParams callParams;
    11382  GetBasicParams(callParams);
    11383 
    11384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11385  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11386  createInfo.memoryTypeIndex,
    11387  createInfo.flags,
    11388  createInfo.blockSize,
    11389  (uint64_t)createInfo.minBlockCount,
    11390  (uint64_t)createInfo.maxBlockCount,
    11391  createInfo.frameInUseCount,
    11392  pool);
    11393  Flush();
    11394 }
    11395 
    11396 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11397 {
    11398  CallParams callParams;
    11399  GetBasicParams(callParams);
    11400 
    11401  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11402  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11403  pool);
    11404  Flush();
    11405 }
    11406 
    11407 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11408  const VkMemoryRequirements& vkMemReq,
    11409  const VmaAllocationCreateInfo& createInfo,
    11410  VmaAllocation allocation)
    11411 {
    11412  CallParams callParams;
    11413  GetBasicParams(callParams);
    11414 
    11415  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11416  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11417  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11418  vkMemReq.size,
    11419  vkMemReq.alignment,
    11420  vkMemReq.memoryTypeBits,
    11421  createInfo.flags,
    11422  createInfo.usage,
    11423  createInfo.requiredFlags,
    11424  createInfo.preferredFlags,
    11425  createInfo.memoryTypeBits,
    11426  createInfo.pool,
    11427  allocation,
    11428  userDataStr.GetString());
    11429  Flush();
    11430 }
    11431 
    11432 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11433  const VkMemoryRequirements& vkMemReq,
    11434  bool requiresDedicatedAllocation,
    11435  bool prefersDedicatedAllocation,
    11436  const VmaAllocationCreateInfo& createInfo,
    11437  VmaAllocation allocation)
    11438 {
    11439  CallParams callParams;
    11440  GetBasicParams(callParams);
    11441 
    11442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11443  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11444  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11445  vkMemReq.size,
    11446  vkMemReq.alignment,
    11447  vkMemReq.memoryTypeBits,
    11448  requiresDedicatedAllocation ? 1 : 0,
    11449  prefersDedicatedAllocation ? 1 : 0,
    11450  createInfo.flags,
    11451  createInfo.usage,
    11452  createInfo.requiredFlags,
    11453  createInfo.preferredFlags,
    11454  createInfo.memoryTypeBits,
    11455  createInfo.pool,
    11456  allocation,
    11457  userDataStr.GetString());
    11458  Flush();
    11459 }
    11460 
    11461 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11462  const VkMemoryRequirements& vkMemReq,
    11463  bool requiresDedicatedAllocation,
    11464  bool prefersDedicatedAllocation,
    11465  const VmaAllocationCreateInfo& createInfo,
    11466  VmaAllocation allocation)
    11467 {
    11468  CallParams callParams;
    11469  GetBasicParams(callParams);
    11470 
    11471  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11472  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11473  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11474  vkMemReq.size,
    11475  vkMemReq.alignment,
    11476  vkMemReq.memoryTypeBits,
    11477  requiresDedicatedAllocation ? 1 : 0,
    11478  prefersDedicatedAllocation ? 1 : 0,
    11479  createInfo.flags,
    11480  createInfo.usage,
    11481  createInfo.requiredFlags,
    11482  createInfo.preferredFlags,
    11483  createInfo.memoryTypeBits,
    11484  createInfo.pool,
    11485  allocation,
    11486  userDataStr.GetString());
    11487  Flush();
    11488 }
    11489 
    11490 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11491  VmaAllocation allocation)
    11492 {
    11493  CallParams callParams;
    11494  GetBasicParams(callParams);
    11495 
    11496  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11497  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11498  allocation);
    11499  Flush();
    11500 }
    11501 
    11502 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11503  VmaAllocation allocation,
    11504  const void* pUserData)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  UserDataString userDataStr(
    11511  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11512  pUserData);
    11513  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11514  allocation,
    11515  userDataStr.GetString());
    11516  Flush();
    11517 }
    11518 
    11519 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11520  VmaAllocation allocation)
    11521 {
    11522  CallParams callParams;
    11523  GetBasicParams(callParams);
    11524 
    11525  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation);
    11528  Flush();
    11529 }
    11530 
    11531 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11532  VmaAllocation allocation)
    11533 {
    11534  CallParams callParams;
    11535  GetBasicParams(callParams);
    11536 
    11537  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11538  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11539  allocation);
    11540  Flush();
    11541 }
    11542 
    11543 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11544  VmaAllocation allocation)
    11545 {
    11546  CallParams callParams;
    11547  GetBasicParams(callParams);
    11548 
    11549  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11550  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11551  allocation);
    11552  Flush();
    11553 }
    11554 
    11555 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11556  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11557 {
    11558  CallParams callParams;
    11559  GetBasicParams(callParams);
    11560 
    11561  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11562  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11563  allocation,
    11564  offset,
    11565  size);
    11566  Flush();
    11567 }
    11568 
    11569 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11570  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11571 {
    11572  CallParams callParams;
    11573  GetBasicParams(callParams);
    11574 
    11575  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11576  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11577  allocation,
    11578  offset,
    11579  size);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11584  const VkBufferCreateInfo& bufCreateInfo,
    11585  const VmaAllocationCreateInfo& allocCreateInfo,
    11586  VmaAllocation allocation)
    11587 {
    11588  CallParams callParams;
    11589  GetBasicParams(callParams);
    11590 
    11591  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11592  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11593  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11594  bufCreateInfo.flags,
    11595  bufCreateInfo.size,
    11596  bufCreateInfo.usage,
    11597  bufCreateInfo.sharingMode,
    11598  allocCreateInfo.flags,
    11599  allocCreateInfo.usage,
    11600  allocCreateInfo.requiredFlags,
    11601  allocCreateInfo.preferredFlags,
    11602  allocCreateInfo.memoryTypeBits,
    11603  allocCreateInfo.pool,
    11604  allocation,
    11605  userDataStr.GetString());
    11606  Flush();
    11607 }
    11608 
    11609 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11610  const VkImageCreateInfo& imageCreateInfo,
    11611  const VmaAllocationCreateInfo& allocCreateInfo,
    11612  VmaAllocation allocation)
    11613 {
    11614  CallParams callParams;
    11615  GetBasicParams(callParams);
    11616 
    11617  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11618  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11619  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11620  imageCreateInfo.flags,
    11621  imageCreateInfo.imageType,
    11622  imageCreateInfo.format,
    11623  imageCreateInfo.extent.width,
    11624  imageCreateInfo.extent.height,
    11625  imageCreateInfo.extent.depth,
    11626  imageCreateInfo.mipLevels,
    11627  imageCreateInfo.arrayLayers,
    11628  imageCreateInfo.samples,
    11629  imageCreateInfo.tiling,
    11630  imageCreateInfo.usage,
    11631  imageCreateInfo.sharingMode,
    11632  imageCreateInfo.initialLayout,
    11633  allocCreateInfo.flags,
    11634  allocCreateInfo.usage,
    11635  allocCreateInfo.requiredFlags,
    11636  allocCreateInfo.preferredFlags,
    11637  allocCreateInfo.memoryTypeBits,
    11638  allocCreateInfo.pool,
    11639  allocation,
    11640  userDataStr.GetString());
    11641  Flush();
    11642 }
    11643 
    11644 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11645  VmaAllocation allocation)
    11646 {
    11647  CallParams callParams;
    11648  GetBasicParams(callParams);
    11649 
    11650  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11651  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11652  allocation);
    11653  Flush();
    11654 }
    11655 
    11656 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11657  VmaAllocation allocation)
    11658 {
    11659  CallParams callParams;
    11660  GetBasicParams(callParams);
    11661 
    11662  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11663  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11664  allocation);
    11665  Flush();
    11666 }
    11667 
    11668 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11676  allocation);
    11677  Flush();
    11678 }
    11679 
    11680 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11681  VmaAllocation allocation)
    11682 {
    11683  CallParams callParams;
    11684  GetBasicParams(callParams);
    11685 
    11686  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11687  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11688  allocation);
    11689  Flush();
    11690 }
    11691 
    11692 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11693  VmaPool pool)
    11694 {
    11695  CallParams callParams;
    11696  GetBasicParams(callParams);
    11697 
    11698  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11699  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11700  pool);
    11701  Flush();
    11702 }
    11703 
    11704 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11705 {
    11706  if(pUserData != VMA_NULL)
    11707  {
    11708  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11709  {
    11710  m_Str = (const char*)pUserData;
    11711  }
    11712  else
    11713  {
    11714  sprintf_s(m_PtrStr, "%p", pUserData);
    11715  m_Str = m_PtrStr;
    11716  }
    11717  }
    11718  else
    11719  {
    11720  m_Str = "";
    11721  }
    11722 }
    11723 
    11724 void VmaRecorder::WriteConfiguration(
    11725  const VkPhysicalDeviceProperties& devProps,
    11726  const VkPhysicalDeviceMemoryProperties& memProps,
    11727  bool dedicatedAllocationExtensionEnabled)
    11728 {
    11729  fprintf(m_File, "Config,Begin\n");
    11730 
    11731  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11732  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11733  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11734  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11735  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11736  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11737 
    11738  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11739  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11740  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11743  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11744  {
    11745  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11746  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11747  }
    11748  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11749  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11750  {
    11751  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11752  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11753  }
    11754 
    11755  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11756 
    11757  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11758  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11759  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11760  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11761  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11764  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11765  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11766 
    11767  fprintf(m_File, "Config,End\n");
    11768 }
    11769 
    11770 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11771 {
    11772  outParams.threadId = GetCurrentThreadId();
    11773 
    11774  LARGE_INTEGER counter;
    11775  QueryPerformanceCounter(&counter);
    11776  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11777 }
    11778 
    11779 void VmaRecorder::Flush()
    11780 {
    11781  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11782  {
    11783  fflush(m_File);
    11784  }
    11785 }
    11786 
    11787 #endif // #if VMA_RECORDING_ENABLED
    11788 
    11790 // VmaAllocator_T
    11791 
    11792 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11793  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11794  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11795  m_hDevice(pCreateInfo->device),
    11796  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11797  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11798  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11799  m_PreferredLargeHeapBlockSize(0),
    11800  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11801  m_CurrentFrameIndex(0),
    11802  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11803  m_NextPoolId(0)
    11805  ,m_pRecorder(VMA_NULL)
    11806 #endif
    11807 {
    11808  if(VMA_DEBUG_DETECT_CORRUPTION)
    11809  {
    11810  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11811  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11812  }
    11813 
    11814  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11815 
    11816 #if !(VMA_DEDICATED_ALLOCATION)
    11818  {
    11819  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11820  }
    11821 #endif
    11822 
    11823  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11824  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11825  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11826 
    11827  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11828  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11829 
    11830  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11831  {
    11832  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11833  }
    11834 
    11835  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11836  {
    11837  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11838  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11839  }
    11840 
    11841  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11842 
    11843  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11844  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11845 
    11846  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11847  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11848  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11849  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11850 
    11851  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11852  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11853 
    11854  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11855  {
    11856  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11857  {
    11858  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11859  if(limit != VK_WHOLE_SIZE)
    11860  {
    11861  m_HeapSizeLimit[heapIndex] = limit;
    11862  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11863  {
    11864  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11865  }
    11866  }
    11867  }
    11868  }
    11869 
    11870  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11871  {
    11872  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11873 
    11874  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11875  this,
    11876  memTypeIndex,
    11877  preferredBlockSize,
    11878  0,
    11879  SIZE_MAX,
    11880  GetBufferImageGranularity(),
    11881  pCreateInfo->frameInUseCount,
    11882  false, // isCustomPool
    11883  false, // explicitBlockSize
    11884  false); // linearAlgorithm
    11885  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11886  // becase minBlockCount is 0.
    11887  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11888 
    11889  }
    11890 }
    11891 
    11892 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11893 {
    11894  VkResult res = VK_SUCCESS;
    11895 
    11896  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11897  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11898  {
    11899 #if VMA_RECORDING_ENABLED
    11900  m_pRecorder = vma_new(this, VmaRecorder)();
    11901  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11902  if(res != VK_SUCCESS)
    11903  {
    11904  return res;
    11905  }
    11906  m_pRecorder->WriteConfiguration(
    11907  m_PhysicalDeviceProperties,
    11908  m_MemProps,
    11909  m_UseKhrDedicatedAllocation);
    11910  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11911 #else
    11912  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11913  return VK_ERROR_FEATURE_NOT_PRESENT;
    11914 #endif
    11915  }
    11916 
    11917  return res;
    11918 }
    11919 
    11920 VmaAllocator_T::~VmaAllocator_T()
    11921 {
    11922 #if VMA_RECORDING_ENABLED
    11923  if(m_pRecorder != VMA_NULL)
    11924  {
    11925  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11926  vma_delete(this, m_pRecorder);
    11927  }
    11928 #endif
    11929 
    11930  VMA_ASSERT(m_Pools.empty());
    11931 
    11932  for(size_t i = GetMemoryTypeCount(); i--; )
    11933  {
    11934  vma_delete(this, m_pDedicatedAllocations[i]);
    11935  vma_delete(this, m_pBlockVectors[i]);
    11936  }
    11937 }
    11938 
    11939 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11940 {
    11941 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11942  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11943  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11944  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11945  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11946  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11947  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11948  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11949  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11950  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11951  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11952  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11953  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11954  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11955  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11956  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11957  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11958 #if VMA_DEDICATED_ALLOCATION
    11959  if(m_UseKhrDedicatedAllocation)
    11960  {
    11961  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11962  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11963  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11964  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11965  }
    11966 #endif // #if VMA_DEDICATED_ALLOCATION
    11967 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11968 
    11969 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11970  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11971 
    11972  if(pVulkanFunctions != VMA_NULL)
    11973  {
    11974  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11975  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11976  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11977  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11978  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11979  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11980  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11981  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11982  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11986  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11987  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11988  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11989  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11990 #if VMA_DEDICATED_ALLOCATION
    11991  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11992  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11993 #endif
    11994  }
    11995 
    11996 #undef VMA_COPY_IF_NOT_NULL
    11997 
    11998  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    11999  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12000  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12001  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12002  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12003  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12004  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12016 #if VMA_DEDICATED_ALLOCATION
    12017  if(m_UseKhrDedicatedAllocation)
    12018  {
    12019  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12021  }
    12022 #endif
    12023 }
    12024 
    12025 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12026 {
    12027  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12028  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12029  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12030  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12031 }
    12032 
    12033 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12034  VkDeviceSize size,
    12035  VkDeviceSize alignment,
    12036  bool dedicatedAllocation,
    12037  VkBuffer dedicatedBuffer,
    12038  VkImage dedicatedImage,
    12039  const VmaAllocationCreateInfo& createInfo,
    12040  uint32_t memTypeIndex,
    12041  VmaSuballocationType suballocType,
    12042  VmaAllocation* pAllocation)
    12043 {
    12044  VMA_ASSERT(pAllocation != VMA_NULL);
    12045  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12046 
    12047  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12048 
    12049  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12050  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12051  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12052  {
    12053  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12054  }
    12055 
    12056  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12057  VMA_ASSERT(blockVector);
    12058 
    12059  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12060  bool preferDedicatedMemory =
    12061  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12062  dedicatedAllocation ||
    12063  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12064  size > preferredBlockSize / 2;
    12065 
    12066  if(preferDedicatedMemory &&
    12067  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12068  finalCreateInfo.pool == VK_NULL_HANDLE)
    12069  {
    12071  }
    12072 
    12073  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12074  {
    12075  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12076  {
    12077  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12078  }
    12079  else
    12080  {
    12081  return AllocateDedicatedMemory(
    12082  size,
    12083  suballocType,
    12084  memTypeIndex,
    12085  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12086  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12087  finalCreateInfo.pUserData,
    12088  dedicatedBuffer,
    12089  dedicatedImage,
    12090  pAllocation);
    12091  }
    12092  }
    12093  else
    12094  {
    12095  VkResult res = blockVector->Allocate(
    12096  VK_NULL_HANDLE, // hCurrentPool
    12097  m_CurrentFrameIndex.load(),
    12098  size,
    12099  alignment,
    12100  finalCreateInfo,
    12101  suballocType,
    12102  pAllocation);
    12103  if(res == VK_SUCCESS)
    12104  {
    12105  return res;
    12106  }
    12107 
    12108  // 5. Try dedicated memory.
    12109  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12110  {
    12111  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12112  }
    12113  else
    12114  {
    12115  res = AllocateDedicatedMemory(
    12116  size,
    12117  suballocType,
    12118  memTypeIndex,
    12119  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12120  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12121  finalCreateInfo.pUserData,
    12122  dedicatedBuffer,
    12123  dedicatedImage,
    12124  pAllocation);
    12125  if(res == VK_SUCCESS)
    12126  {
    12127  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12128  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12129  return VK_SUCCESS;
    12130  }
    12131  else
    12132  {
    12133  // Everything failed: Return error code.
    12134  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12135  return res;
    12136  }
    12137  }
    12138  }
    12139 }
    12140 
    12141 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12142  VkDeviceSize size,
    12143  VmaSuballocationType suballocType,
    12144  uint32_t memTypeIndex,
    12145  bool map,
    12146  bool isUserDataString,
    12147  void* pUserData,
    12148  VkBuffer dedicatedBuffer,
    12149  VkImage dedicatedImage,
    12150  VmaAllocation* pAllocation)
    12151 {
    12152  VMA_ASSERT(pAllocation);
    12153 
    12154  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12155  allocInfo.memoryTypeIndex = memTypeIndex;
    12156  allocInfo.allocationSize = size;
    12157 
    12158 #if VMA_DEDICATED_ALLOCATION
    12159  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12160  if(m_UseKhrDedicatedAllocation)
    12161  {
    12162  if(dedicatedBuffer != VK_NULL_HANDLE)
    12163  {
    12164  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12165  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12166  allocInfo.pNext = &dedicatedAllocInfo;
    12167  }
    12168  else if(dedicatedImage != VK_NULL_HANDLE)
    12169  {
    12170  dedicatedAllocInfo.image = dedicatedImage;
    12171  allocInfo.pNext = &dedicatedAllocInfo;
    12172  }
    12173  }
    12174 #endif // #if VMA_DEDICATED_ALLOCATION
    12175 
    12176  // Allocate VkDeviceMemory.
    12177  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12178  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12179  if(res < 0)
    12180  {
    12181  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12182  return res;
    12183  }
    12184 
    12185  void* pMappedData = VMA_NULL;
    12186  if(map)
    12187  {
    12188  res = (*m_VulkanFunctions.vkMapMemory)(
    12189  m_hDevice,
    12190  hMemory,
    12191  0,
    12192  VK_WHOLE_SIZE,
    12193  0,
    12194  &pMappedData);
    12195  if(res < 0)
    12196  {
    12197  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12198  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12199  return res;
    12200  }
    12201  }
    12202 
    12203  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12204  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12205  (*pAllocation)->SetUserData(this, pUserData);
    12206  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12207  {
    12208  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12209  }
    12210 
    12211  // Register it in m_pDedicatedAllocations.
    12212  {
    12213  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12214  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12215  VMA_ASSERT(pDedicatedAllocations);
    12216  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12217  }
    12218 
    12219  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12220 
    12221  return VK_SUCCESS;
    12222 }
    12223 
    12224 void VmaAllocator_T::GetBufferMemoryRequirements(
    12225  VkBuffer hBuffer,
    12226  VkMemoryRequirements& memReq,
    12227  bool& requiresDedicatedAllocation,
    12228  bool& prefersDedicatedAllocation) const
    12229 {
    12230 #if VMA_DEDICATED_ALLOCATION
    12231  if(m_UseKhrDedicatedAllocation)
    12232  {
    12233  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12234  memReqInfo.buffer = hBuffer;
    12235 
    12236  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12237 
    12238  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12239  memReq2.pNext = &memDedicatedReq;
    12240 
    12241  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12242 
    12243  memReq = memReq2.memoryRequirements;
    12244  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12245  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12246  }
    12247  else
    12248 #endif // #if VMA_DEDICATED_ALLOCATION
    12249  {
    12250  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12251  requiresDedicatedAllocation = false;
    12252  prefersDedicatedAllocation = false;
    12253  }
    12254 }
    12255 
    12256 void VmaAllocator_T::GetImageMemoryRequirements(
    12257  VkImage hImage,
    12258  VkMemoryRequirements& memReq,
    12259  bool& requiresDedicatedAllocation,
    12260  bool& prefersDedicatedAllocation) const
    12261 {
    12262 #if VMA_DEDICATED_ALLOCATION
    12263  if(m_UseKhrDedicatedAllocation)
    12264  {
    12265  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12266  memReqInfo.image = hImage;
    12267 
    12268  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12269 
    12270  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12271  memReq2.pNext = &memDedicatedReq;
    12272 
    12273  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12274 
    12275  memReq = memReq2.memoryRequirements;
    12276  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12277  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12278  }
    12279  else
    12280 #endif // #if VMA_DEDICATED_ALLOCATION
    12281  {
    12282  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12283  requiresDedicatedAllocation = false;
    12284  prefersDedicatedAllocation = false;
    12285  }
    12286 }
    12287 
    12288 VkResult VmaAllocator_T::AllocateMemory(
    12289  const VkMemoryRequirements& vkMemReq,
    12290  bool requiresDedicatedAllocation,
    12291  bool prefersDedicatedAllocation,
    12292  VkBuffer dedicatedBuffer,
    12293  VkImage dedicatedImage,
    12294  const VmaAllocationCreateInfo& createInfo,
    12295  VmaSuballocationType suballocType,
    12296  VmaAllocation* pAllocation)
    12297 {
    12298  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12299 
    12300  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12301  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12302  {
    12303  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12304  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12305  }
    12306  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12308  {
    12309  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12310  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12311  }
    12312  if(requiresDedicatedAllocation)
    12313  {
    12314  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12315  {
    12316  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12317  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12318  }
    12319  if(createInfo.pool != VK_NULL_HANDLE)
    12320  {
    12321  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12322  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12323  }
    12324  }
    12325  if((createInfo.pool != VK_NULL_HANDLE) &&
    12326  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12327  {
    12328  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331 
    12332  if(createInfo.pool != VK_NULL_HANDLE)
    12333  {
    12334  const VkDeviceSize alignmentForPool = VMA_MAX(
    12335  vkMemReq.alignment,
    12336  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12337  return createInfo.pool->m_BlockVector.Allocate(
    12338  createInfo.pool,
    12339  m_CurrentFrameIndex.load(),
    12340  vkMemReq.size,
    12341  alignmentForPool,
    12342  createInfo,
    12343  suballocType,
    12344  pAllocation);
    12345  }
    12346  else
    12347  {
    12348  // Bit mask of memory Vulkan types acceptable for this allocation.
    12349  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12350  uint32_t memTypeIndex = UINT32_MAX;
    12351  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12352  if(res == VK_SUCCESS)
    12353  {
    12354  VkDeviceSize alignmentForMemType = VMA_MAX(
    12355  vkMemReq.alignment,
    12356  GetMemoryTypeMinAlignment(memTypeIndex));
    12357 
    12358  res = AllocateMemoryOfType(
    12359  vkMemReq.size,
    12360  alignmentForMemType,
    12361  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12362  dedicatedBuffer,
    12363  dedicatedImage,
    12364  createInfo,
    12365  memTypeIndex,
    12366  suballocType,
    12367  pAllocation);
    12368  // Succeeded on first try.
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  return res;
    12372  }
    12373  // Allocation from this memory type failed. Try other compatible memory types.
    12374  else
    12375  {
    12376  for(;;)
    12377  {
    12378  // Remove old memTypeIndex from list of possibilities.
    12379  memoryTypeBits &= ~(1u << memTypeIndex);
    12380  // Find alternative memTypeIndex.
    12381  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12382  if(res == VK_SUCCESS)
    12383  {
    12384  alignmentForMemType = VMA_MAX(
    12385  vkMemReq.alignment,
    12386  GetMemoryTypeMinAlignment(memTypeIndex));
    12387 
    12388  res = AllocateMemoryOfType(
    12389  vkMemReq.size,
    12390  alignmentForMemType,
    12391  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12392  dedicatedBuffer,
    12393  dedicatedImage,
    12394  createInfo,
    12395  memTypeIndex,
    12396  suballocType,
    12397  pAllocation);
    12398  // Allocation from this alternative memory type succeeded.
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  return res;
    12402  }
    12403  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12404  }
    12405  // No other matching memory type index could be found.
    12406  else
    12407  {
    12408  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12409  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12410  }
    12411  }
    12412  }
    12413  }
    12414  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12415  else
    12416  return res;
    12417  }
    12418 }
    12419 
    12420 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12421 {
    12422  VMA_ASSERT(allocation);
    12423 
    12424  if(TouchAllocation(allocation))
    12425  {
    12426  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12427  {
    12428  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12429  }
    12430 
    12431  switch(allocation->GetType())
    12432  {
    12433  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12434  {
    12435  VmaBlockVector* pBlockVector = VMA_NULL;
    12436  VmaPool hPool = allocation->GetPool();
    12437  if(hPool != VK_NULL_HANDLE)
    12438  {
    12439  pBlockVector = &hPool->m_BlockVector;
    12440  }
    12441  else
    12442  {
    12443  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12444  pBlockVector = m_pBlockVectors[memTypeIndex];
    12445  }
    12446  pBlockVector->Free(allocation);
    12447  }
    12448  break;
    12449  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12450  FreeDedicatedMemory(allocation);
    12451  break;
    12452  default:
    12453  VMA_ASSERT(0);
    12454  }
    12455  }
    12456 
    12457  allocation->SetUserData(this, VMA_NULL);
    12458  vma_delete(this, allocation);
    12459 }
    12460 
    12461 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12462 {
    12463  // Initialize.
    12464  InitStatInfo(pStats->total);
    12465  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12466  InitStatInfo(pStats->memoryType[i]);
    12467  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12468  InitStatInfo(pStats->memoryHeap[i]);
    12469 
    12470  // Process default pools.
    12471  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12472  {
    12473  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12474  VMA_ASSERT(pBlockVector);
    12475  pBlockVector->AddStats(pStats);
    12476  }
    12477 
    12478  // Process custom pools.
    12479  {
    12480  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12481  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12482  {
    12483  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12484  }
    12485  }
    12486 
    12487  // Process dedicated allocations.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12491  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12492  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12493  VMA_ASSERT(pDedicatedAllocVector);
    12494  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12495  {
    12496  VmaStatInfo allocationStatInfo;
    12497  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12498  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12499  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12500  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12501  }
    12502  }
    12503 
    12504  // Postprocess.
    12505  VmaPostprocessCalcStatInfo(pStats->total);
    12506  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12507  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12508  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12509  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12510 }
    12511 
    12512 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12513 
    12514 VkResult VmaAllocator_T::Defragment(
    12515  VmaAllocation* pAllocations,
    12516  size_t allocationCount,
    12517  VkBool32* pAllocationsChanged,
    12518  const VmaDefragmentationInfo* pDefragmentationInfo,
    12519  VmaDefragmentationStats* pDefragmentationStats)
    12520 {
    12521  if(pAllocationsChanged != VMA_NULL)
    12522  {
    12523  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12524  }
    12525  if(pDefragmentationStats != VMA_NULL)
    12526  {
    12527  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12528  }
    12529 
    12530  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12531 
    12532  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12533 
    12534  const size_t poolCount = m_Pools.size();
    12535 
    12536  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12537  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12538  {
    12539  VmaAllocation hAlloc = pAllocations[allocIndex];
    12540  VMA_ASSERT(hAlloc);
    12541  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12542  // DedicatedAlloc cannot be defragmented.
    12543  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12544  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12545  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12546  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12547  // Lost allocation cannot be defragmented.
    12548  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12549  {
    12550  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12551 
    12552  const VmaPool hAllocPool = hAlloc->GetPool();
    12553  // This allocation belongs to custom pool.
    12554  if(hAllocPool != VK_NULL_HANDLE)
    12555  {
    12556  // Pools with linear or buddy algorithm are not defragmented.
    12557  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12558  {
    12559  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12560  }
    12561  }
    12562  // This allocation belongs to general pool.
    12563  else
    12564  {
    12565  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12566  }
    12567 
    12568  if(pAllocBlockVector != VMA_NULL)
    12569  {
    12570  VmaDefragmentator* const pDefragmentator =
    12571  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12572  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12573  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12574  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12575  }
    12576  }
    12577  }
    12578 
    12579  VkResult result = VK_SUCCESS;
    12580 
    12581  // ======== Main processing.
    12582 
    12583  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12584  uint32_t maxAllocationsToMove = UINT32_MAX;
    12585  if(pDefragmentationInfo != VMA_NULL)
    12586  {
    12587  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12588  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12589  }
    12590 
    12591  // Process standard memory.
    12592  for(uint32_t memTypeIndex = 0;
    12593  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12594  ++memTypeIndex)
    12595  {
    12596  // Only HOST_VISIBLE memory types can be defragmented.
    12597  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12598  {
    12599  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12600  pDefragmentationStats,
    12601  maxBytesToMove,
    12602  maxAllocationsToMove);
    12603  }
    12604  }
    12605 
    12606  // Process custom pools.
    12607  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12608  {
    12609  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12610  pDefragmentationStats,
    12611  maxBytesToMove,
    12612  maxAllocationsToMove);
    12613  }
    12614 
    12615  // ======== Destroy defragmentators.
    12616 
    12617  // Process custom pools.
    12618  for(size_t poolIndex = poolCount; poolIndex--; )
    12619  {
    12620  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12621  }
    12622 
    12623  // Process standard memory.
    12624  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12625  {
    12626  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12627  {
    12628  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12629  }
    12630  }
    12631 
    12632  return result;
    12633 }
    12634 
    12635 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12636 {
    12637  if(hAllocation->CanBecomeLost())
    12638  {
    12639  /*
    12640  Warning: This is a carefully designed algorithm.
    12641  Do not modify unless you really know what you're doing :)
    12642  */
    12643  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12644  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12645  for(;;)
    12646  {
    12647  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12648  {
    12649  pAllocationInfo->memoryType = UINT32_MAX;
    12650  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12651  pAllocationInfo->offset = 0;
    12652  pAllocationInfo->size = hAllocation->GetSize();
    12653  pAllocationInfo->pMappedData = VMA_NULL;
    12654  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12655  return;
    12656  }
    12657  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12658  {
    12659  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12660  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12661  pAllocationInfo->offset = hAllocation->GetOffset();
    12662  pAllocationInfo->size = hAllocation->GetSize();
    12663  pAllocationInfo->pMappedData = VMA_NULL;
    12664  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12665  return;
    12666  }
    12667  else // Last use time earlier than current time.
    12668  {
    12669  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12670  {
    12671  localLastUseFrameIndex = localCurrFrameIndex;
    12672  }
    12673  }
    12674  }
    12675  }
    12676  else
    12677  {
    12678 #if VMA_STATS_STRING_ENABLED
    12679  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12680  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12681  for(;;)
    12682  {
    12683  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12684  if(localLastUseFrameIndex == localCurrFrameIndex)
    12685  {
    12686  break;
    12687  }
    12688  else // Last use time earlier than current time.
    12689  {
    12690  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12691  {
    12692  localLastUseFrameIndex = localCurrFrameIndex;
    12693  }
    12694  }
    12695  }
    12696 #endif
    12697 
    12698  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12699  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12700  pAllocationInfo->offset = hAllocation->GetOffset();
    12701  pAllocationInfo->size = hAllocation->GetSize();
    12702  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12703  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12704  }
    12705 }
    12706 
    12707 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12708 {
    12709  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12710  if(hAllocation->CanBecomeLost())
    12711  {
    12712  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12713  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12714  for(;;)
    12715  {
    12716  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12717  {
    12718  return false;
    12719  }
    12720  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12721  {
    12722  return true;
    12723  }
    12724  else // Last use time earlier than current time.
    12725  {
    12726  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12727  {
    12728  localLastUseFrameIndex = localCurrFrameIndex;
    12729  }
    12730  }
    12731  }
    12732  }
    12733  else
    12734  {
    12735 #if VMA_STATS_STRING_ENABLED
    12736  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12737  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12738  for(;;)
    12739  {
    12740  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12741  if(localLastUseFrameIndex == localCurrFrameIndex)
    12742  {
    12743  break;
    12744  }
    12745  else // Last use time earlier than current time.
    12746  {
    12747  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12748  {
    12749  localLastUseFrameIndex = localCurrFrameIndex;
    12750  }
    12751  }
    12752  }
    12753 #endif
    12754 
    12755  return true;
    12756  }
    12757 }
    12758 
    12759 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12760 {
    12761  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12762 
    12763  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12764 
    12765  if(newCreateInfo.maxBlockCount == 0)
    12766  {
    12767  newCreateInfo.maxBlockCount = SIZE_MAX;
    12768  }
    12769  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12770  {
    12771  return VK_ERROR_INITIALIZATION_FAILED;
    12772  }
    12773 
    12774  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12775 
    12776  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12777 
    12778  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12779  if(res != VK_SUCCESS)
    12780  {
    12781  vma_delete(this, *pPool);
    12782  *pPool = VMA_NULL;
    12783  return res;
    12784  }
    12785 
    12786  // Add to m_Pools.
    12787  {
    12788  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12789  (*pPool)->SetId(m_NextPoolId++);
    12790  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12791  }
    12792 
    12793  return VK_SUCCESS;
    12794 }
    12795 
    12796 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12797 {
    12798  // Remove from m_Pools.
    12799  {
    12800  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12801  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12802  VMA_ASSERT(success && "Pool not found in Allocator.");
    12803  }
    12804 
    12805  vma_delete(this, pool);
    12806 }
    12807 
    12808 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12809 {
    12810  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12811 }
    12812 
    12813 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12814 {
    12815  m_CurrentFrameIndex.store(frameIndex);
    12816 }
    12817 
    12818 void VmaAllocator_T::MakePoolAllocationsLost(
    12819  VmaPool hPool,
    12820  size_t* pLostAllocationCount)
    12821 {
    12822  hPool->m_BlockVector.MakePoolAllocationsLost(
    12823  m_CurrentFrameIndex.load(),
    12824  pLostAllocationCount);
    12825 }
    12826 
    12827 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12828 {
    12829  return hPool->m_BlockVector.CheckCorruption();
    12830 }
    12831 
    12832 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12833 {
    12834  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12835 
    12836  // Process default pools.
    12837  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12838  {
    12839  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12840  {
    12841  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12842  VMA_ASSERT(pBlockVector);
    12843  VkResult localRes = pBlockVector->CheckCorruption();
    12844  switch(localRes)
    12845  {
    12846  case VK_ERROR_FEATURE_NOT_PRESENT:
    12847  break;
    12848  case VK_SUCCESS:
    12849  finalRes = VK_SUCCESS;
    12850  break;
    12851  default:
    12852  return localRes;
    12853  }
    12854  }
    12855  }
    12856 
    12857  // Process custom pools.
    12858  {
    12859  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12860  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12861  {
    12862  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12863  {
    12864  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12865  switch(localRes)
    12866  {
    12867  case VK_ERROR_FEATURE_NOT_PRESENT:
    12868  break;
    12869  case VK_SUCCESS:
    12870  finalRes = VK_SUCCESS;
    12871  break;
    12872  default:
    12873  return localRes;
    12874  }
    12875  }
    12876  }
    12877  }
    12878 
    12879  return finalRes;
    12880 }
    12881 
    12882 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12883 {
    12884  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12885  (*pAllocation)->InitLost();
    12886 }
    12887 
    12888 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12889 {
    12890  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12891 
    12892  VkResult res;
    12893  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12894  {
    12895  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12896  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12897  {
    12898  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12899  if(res == VK_SUCCESS)
    12900  {
    12901  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12902  }
    12903  }
    12904  else
    12905  {
    12906  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12907  }
    12908  }
    12909  else
    12910  {
    12911  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12912  }
    12913 
    12914  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12915  {
    12916  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12917  }
    12918 
    12919  return res;
    12920 }
    12921 
    12922 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12923 {
    12924  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12925  {
    12926  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12927  }
    12928 
    12929  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12930 
    12931  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12932  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12933  {
    12934  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12935  m_HeapSizeLimit[heapIndex] += size;
    12936  }
    12937 }
    12938 
    12939 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12940 {
    12941  if(hAllocation->CanBecomeLost())
    12942  {
    12943  return VK_ERROR_MEMORY_MAP_FAILED;
    12944  }
    12945 
    12946  switch(hAllocation->GetType())
    12947  {
    12948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12949  {
    12950  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12951  char *pBytes = VMA_NULL;
    12952  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12953  if(res == VK_SUCCESS)
    12954  {
    12955  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12956  hAllocation->BlockAllocMap();
    12957  }
    12958  return res;
    12959  }
    12960  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12961  return hAllocation->DedicatedAllocMap(this, ppData);
    12962  default:
    12963  VMA_ASSERT(0);
    12964  return VK_ERROR_MEMORY_MAP_FAILED;
    12965  }
    12966 }
    12967 
    12968 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12969 {
    12970  switch(hAllocation->GetType())
    12971  {
    12972  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12973  {
    12974  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12975  hAllocation->BlockAllocUnmap();
    12976  pBlock->Unmap(this, 1);
    12977  }
    12978  break;
    12979  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12980  hAllocation->DedicatedAllocUnmap(this);
    12981  break;
    12982  default:
    12983  VMA_ASSERT(0);
    12984  }
    12985 }
    12986 
    12987 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12988 {
    12989  VkResult res = VK_SUCCESS;
    12990  switch(hAllocation->GetType())
    12991  {
    12992  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12993  res = GetVulkanFunctions().vkBindBufferMemory(
    12994  m_hDevice,
    12995  hBuffer,
    12996  hAllocation->GetMemory(),
    12997  0); //memoryOffset
    12998  break;
    12999  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13000  {
    13001  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13002  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13003  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13004  break;
    13005  }
    13006  default:
    13007  VMA_ASSERT(0);
    13008  }
    13009  return res;
    13010 }
    13011 
    13012 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13013 {
    13014  VkResult res = VK_SUCCESS;
    13015  switch(hAllocation->GetType())
    13016  {
    13017  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13018  res = GetVulkanFunctions().vkBindImageMemory(
    13019  m_hDevice,
    13020  hImage,
    13021  hAllocation->GetMemory(),
    13022  0); //memoryOffset
    13023  break;
    13024  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13025  {
    13026  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13027  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13028  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13029  break;
    13030  }
    13031  default:
    13032  VMA_ASSERT(0);
    13033  }
    13034  return res;
    13035 }
    13036 
    13037 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13038  VmaAllocation hAllocation,
    13039  VkDeviceSize offset, VkDeviceSize size,
    13040  VMA_CACHE_OPERATION op)
    13041 {
    13042  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13043  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13044  {
    13045  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13046  VMA_ASSERT(offset <= allocationSize);
    13047 
    13048  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13049 
    13050  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13051  memRange.memory = hAllocation->GetMemory();
    13052 
    13053  switch(hAllocation->GetType())
    13054  {
    13055  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13056  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13057  if(size == VK_WHOLE_SIZE)
    13058  {
    13059  memRange.size = allocationSize - memRange.offset;
    13060  }
    13061  else
    13062  {
    13063  VMA_ASSERT(offset + size <= allocationSize);
    13064  memRange.size = VMA_MIN(
    13065  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13066  allocationSize - memRange.offset);
    13067  }
    13068  break;
    13069 
    13070  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13071  {
    13072  // 1. Still within this allocation.
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  size = allocationSize - offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  }
    13082  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13083 
    13084  // 2. Adjust to whole block.
    13085  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13086  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13087  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13088  memRange.offset += allocationOffset;
    13089  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13090 
    13091  break;
    13092  }
    13093 
    13094  default:
    13095  VMA_ASSERT(0);
    13096  }
    13097 
    13098  switch(op)
    13099  {
    13100  case VMA_CACHE_FLUSH:
    13101  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13102  break;
    13103  case VMA_CACHE_INVALIDATE:
    13104  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13105  break;
    13106  default:
    13107  VMA_ASSERT(0);
    13108  }
    13109  }
    13110  // else: Just ignore this call.
    13111 }
    13112 
    13113 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13114 {
    13115  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13116 
    13117  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13118  {
    13119  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13120  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13121  VMA_ASSERT(pDedicatedAllocations);
    13122  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13123  VMA_ASSERT(success);
    13124  }
    13125 
    13126  VkDeviceMemory hMemory = allocation->GetMemory();
    13127 
    13128  /*
    13129  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13130  before vkFreeMemory.
    13131 
    13132  if(allocation->GetMappedData() != VMA_NULL)
    13133  {
    13134  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13135  }
    13136  */
    13137 
    13138  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13139 
    13140  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13141 }
    13142 
    13143 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13144 {
    13145  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13146  !hAllocation->CanBecomeLost() &&
    13147  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13148  {
    13149  void* pData = VMA_NULL;
    13150  VkResult res = Map(hAllocation, &pData);
    13151  if(res == VK_SUCCESS)
    13152  {
    13153  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13154  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13155  Unmap(hAllocation);
    13156  }
    13157  else
    13158  {
    13159  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13160  }
    13161  }
    13162 }
    13163 
    13164 #if VMA_STATS_STRING_ENABLED
    13165 
    13166 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13167 {
    13168  bool dedicatedAllocationsStarted = false;
    13169  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13170  {
    13171  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13172  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13173  VMA_ASSERT(pDedicatedAllocVector);
    13174  if(pDedicatedAllocVector->empty() == false)
    13175  {
    13176  if(dedicatedAllocationsStarted == false)
    13177  {
    13178  dedicatedAllocationsStarted = true;
    13179  json.WriteString("DedicatedAllocations");
    13180  json.BeginObject();
    13181  }
    13182 
    13183  json.BeginString("Type ");
    13184  json.ContinueString(memTypeIndex);
    13185  json.EndString();
    13186 
    13187  json.BeginArray();
    13188 
    13189  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13190  {
    13191  json.BeginObject(true);
    13192  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13193  hAlloc->PrintParameters(json);
    13194  json.EndObject();
    13195  }
    13196 
    13197  json.EndArray();
    13198  }
    13199  }
    13200  if(dedicatedAllocationsStarted)
    13201  {
    13202  json.EndObject();
    13203  }
    13204 
    13205  {
    13206  bool allocationsStarted = false;
    13207  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13208  {
    13209  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13210  {
    13211  if(allocationsStarted == false)
    13212  {
    13213  allocationsStarted = true;
    13214  json.WriteString("DefaultPools");
    13215  json.BeginObject();
    13216  }
    13217 
    13218  json.BeginString("Type ");
    13219  json.ContinueString(memTypeIndex);
    13220  json.EndString();
    13221 
    13222  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13223  }
    13224  }
    13225  if(allocationsStarted)
    13226  {
    13227  json.EndObject();
    13228  }
    13229  }
    13230 
    13231  // Custom pools
    13232  {
    13233  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13234  const size_t poolCount = m_Pools.size();
    13235  if(poolCount > 0)
    13236  {
    13237  json.WriteString("Pools");
    13238  json.BeginObject();
    13239  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13240  {
    13241  json.BeginString();
    13242  json.ContinueString(m_Pools[poolIndex]->GetId());
    13243  json.EndString();
    13244 
    13245  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13246  }
    13247  json.EndObject();
    13248  }
    13249  }
    13250 }
    13251 
    13252 #endif // #if VMA_STATS_STRING_ENABLED
    13253 
    13255 // Public interface
    13256 
    13257 VkResult vmaCreateAllocator(
    13258  const VmaAllocatorCreateInfo* pCreateInfo,
    13259  VmaAllocator* pAllocator)
    13260 {
    13261  VMA_ASSERT(pCreateInfo && pAllocator);
    13262  VMA_DEBUG_LOG("vmaCreateAllocator");
    13263  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13264  return (*pAllocator)->Init(pCreateInfo);
    13265 }
    13266 
    13267 void vmaDestroyAllocator(
    13268  VmaAllocator allocator)
    13269 {
    13270  if(allocator != VK_NULL_HANDLE)
    13271  {
    13272  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13273  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13274  vma_delete(&allocationCallbacks, allocator);
    13275  }
    13276 }
    13277 
    13279  VmaAllocator allocator,
    13280  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13281 {
    13282  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13283  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13284 }
    13285 
    13287  VmaAllocator allocator,
    13288  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13289 {
    13290  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13291  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13292 }
    13293 
    13295  VmaAllocator allocator,
    13296  uint32_t memoryTypeIndex,
    13297  VkMemoryPropertyFlags* pFlags)
    13298 {
    13299  VMA_ASSERT(allocator && pFlags);
    13300  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13301  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13302 }
    13303 
    13305  VmaAllocator allocator,
    13306  uint32_t frameIndex)
    13307 {
    13308  VMA_ASSERT(allocator);
    13309  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13310 
    13311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13312 
    13313  allocator->SetCurrentFrameIndex(frameIndex);
    13314 }
    13315 
    13316 void vmaCalculateStats(
    13317  VmaAllocator allocator,
    13318  VmaStats* pStats)
    13319 {
    13320  VMA_ASSERT(allocator && pStats);
    13321  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13322  allocator->CalculateStats(pStats);
    13323 }
    13324 
    13325 #if VMA_STATS_STRING_ENABLED
    13326 
    13327 void vmaBuildStatsString(
    13328  VmaAllocator allocator,
    13329  char** ppStatsString,
    13330  VkBool32 detailedMap)
    13331 {
    13332  VMA_ASSERT(allocator && ppStatsString);
    13333  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13334 
    13335  VmaStringBuilder sb(allocator);
    13336  {
    13337  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13338  json.BeginObject();
    13339 
    13340  VmaStats stats;
    13341  allocator->CalculateStats(&stats);
    13342 
    13343  json.WriteString("Total");
    13344  VmaPrintStatInfo(json, stats.total);
    13345 
    13346  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13347  {
    13348  json.BeginString("Heap ");
    13349  json.ContinueString(heapIndex);
    13350  json.EndString();
    13351  json.BeginObject();
    13352 
    13353  json.WriteString("Size");
    13354  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13355 
    13356  json.WriteString("Flags");
    13357  json.BeginArray(true);
    13358  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13359  {
    13360  json.WriteString("DEVICE_LOCAL");
    13361  }
    13362  json.EndArray();
    13363 
    13364  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13365  {
    13366  json.WriteString("Stats");
    13367  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13368  }
    13369 
    13370  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13371  {
    13372  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13373  {
    13374  json.BeginString("Type ");
    13375  json.ContinueString(typeIndex);
    13376  json.EndString();
    13377 
    13378  json.BeginObject();
    13379 
    13380  json.WriteString("Flags");
    13381  json.BeginArray(true);
    13382  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13383  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13384  {
    13385  json.WriteString("DEVICE_LOCAL");
    13386  }
    13387  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13388  {
    13389  json.WriteString("HOST_VISIBLE");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_COHERENT");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_CACHED");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13400  {
    13401  json.WriteString("LAZILY_ALLOCATED");
    13402  }
    13403  json.EndArray();
    13404 
    13405  if(stats.memoryType[typeIndex].blockCount > 0)
    13406  {
    13407  json.WriteString("Stats");
    13408  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13409  }
    13410 
    13411  json.EndObject();
    13412  }
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  if(detailedMap == VK_TRUE)
    13418  {
    13419  allocator->PrintDetailedMap(json);
    13420  }
    13421 
    13422  json.EndObject();
    13423  }
    13424 
    13425  const size_t len = sb.GetLength();
    13426  char* const pChars = vma_new_array(allocator, char, len + 1);
    13427  if(len > 0)
    13428  {
    13429  memcpy(pChars, sb.GetData(), len);
    13430  }
    13431  pChars[len] = '\0';
    13432  *ppStatsString = pChars;
    13433 }
    13434 
    13435 void vmaFreeStatsString(
    13436  VmaAllocator allocator,
    13437  char* pStatsString)
    13438 {
    13439  if(pStatsString != VMA_NULL)
    13440  {
    13441  VMA_ASSERT(allocator);
    13442  size_t len = strlen(pStatsString);
    13443  vma_delete_array(allocator, pStatsString, len + 1);
    13444  }
    13445 }
    13446 
    13447 #endif // #if VMA_STATS_STRING_ENABLED
    13448 
    13449 /*
    13450 This function is not protected by any mutex because it just reads immutable data.
    13451 */
    13452 VkResult vmaFindMemoryTypeIndex(
    13453  VmaAllocator allocator,
    13454  uint32_t memoryTypeBits,
    13455  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13456  uint32_t* pMemoryTypeIndex)
    13457 {
    13458  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13459  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13460  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13461 
    13462  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13463  {
    13464  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13465  }
    13466 
    13467  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13468  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13469 
    13470  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13471  if(mapped)
    13472  {
    13473  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13474  }
    13475 
    13476  // Convert usage to requiredFlags and preferredFlags.
    13477  switch(pAllocationCreateInfo->usage)
    13478  {
    13480  break;
    13482  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13483  {
    13484  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13485  }
    13486  break;
    13488  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13489  break;
    13491  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13492  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13493  {
    13494  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13495  }
    13496  break;
    13498  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13499  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13500  break;
    13501  default:
    13502  break;
    13503  }
    13504 
    13505  *pMemoryTypeIndex = UINT32_MAX;
    13506  uint32_t minCost = UINT32_MAX;
    13507  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13508  memTypeIndex < allocator->GetMemoryTypeCount();
    13509  ++memTypeIndex, memTypeBit <<= 1)
    13510  {
    13511  // This memory type is acceptable according to memoryTypeBits bitmask.
    13512  if((memTypeBit & memoryTypeBits) != 0)
    13513  {
    13514  const VkMemoryPropertyFlags currFlags =
    13515  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13516  // This memory type contains requiredFlags.
    13517  if((requiredFlags & ~currFlags) == 0)
    13518  {
    13519  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13520  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13521  // Remember memory type with lowest cost.
    13522  if(currCost < minCost)
    13523  {
    13524  *pMemoryTypeIndex = memTypeIndex;
    13525  if(currCost == 0)
    13526  {
    13527  return VK_SUCCESS;
    13528  }
    13529  minCost = currCost;
    13530  }
    13531  }
    13532  }
    13533  }
    13534  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13535 }
    13536 
    13538  VmaAllocator allocator,
    13539  const VkBufferCreateInfo* pBufferCreateInfo,
    13540  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13541  uint32_t* pMemoryTypeIndex)
    13542 {
    13543  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13544  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13545  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13546  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13547 
    13548  const VkDevice hDev = allocator->m_hDevice;
    13549  VkBuffer hBuffer = VK_NULL_HANDLE;
    13550  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13551  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13552  if(res == VK_SUCCESS)
    13553  {
    13554  VkMemoryRequirements memReq = {};
    13555  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13556  hDev, hBuffer, &memReq);
    13557 
    13558  res = vmaFindMemoryTypeIndex(
    13559  allocator,
    13560  memReq.memoryTypeBits,
    13561  pAllocationCreateInfo,
    13562  pMemoryTypeIndex);
    13563 
    13564  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13565  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13566  }
    13567  return res;
    13568 }
    13569 
    13571  VmaAllocator allocator,
    13572  const VkImageCreateInfo* pImageCreateInfo,
    13573  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13574  uint32_t* pMemoryTypeIndex)
    13575 {
    13576  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13577  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13578  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13579  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13580 
    13581  const VkDevice hDev = allocator->m_hDevice;
    13582  VkImage hImage = VK_NULL_HANDLE;
    13583  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13584  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13585  if(res == VK_SUCCESS)
    13586  {
    13587  VkMemoryRequirements memReq = {};
    13588  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13589  hDev, hImage, &memReq);
    13590 
    13591  res = vmaFindMemoryTypeIndex(
    13592  allocator,
    13593  memReq.memoryTypeBits,
    13594  pAllocationCreateInfo,
    13595  pMemoryTypeIndex);
    13596 
    13597  allocator->GetVulkanFunctions().vkDestroyImage(
    13598  hDev, hImage, allocator->GetAllocationCallbacks());
    13599  }
    13600  return res;
    13601 }
    13602 
    13603 VkResult vmaCreatePool(
    13604  VmaAllocator allocator,
    13605  const VmaPoolCreateInfo* pCreateInfo,
    13606  VmaPool* pPool)
    13607 {
    13608  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13609 
    13610  VMA_DEBUG_LOG("vmaCreatePool");
    13611 
    13612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13613 
    13614  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13615 
    13616 #if VMA_RECORDING_ENABLED
    13617  if(allocator->GetRecorder() != VMA_NULL)
    13618  {
    13619  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13620  }
    13621 #endif
    13622 
    13623  return res;
    13624 }
    13625 
    13626 void vmaDestroyPool(
    13627  VmaAllocator allocator,
    13628  VmaPool pool)
    13629 {
    13630  VMA_ASSERT(allocator);
    13631 
    13632  if(pool == VK_NULL_HANDLE)
    13633  {
    13634  return;
    13635  }
    13636 
    13637  VMA_DEBUG_LOG("vmaDestroyPool");
    13638 
    13639  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13640 
    13641 #if VMA_RECORDING_ENABLED
    13642  if(allocator->GetRecorder() != VMA_NULL)
    13643  {
    13644  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13645  }
    13646 #endif
    13647 
    13648  allocator->DestroyPool(pool);
    13649 }
    13650 
    13651 void vmaGetPoolStats(
    13652  VmaAllocator allocator,
    13653  VmaPool pool,
    13654  VmaPoolStats* pPoolStats)
    13655 {
    13656  VMA_ASSERT(allocator && pool && pPoolStats);
    13657 
    13658  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13659 
    13660  allocator->GetPoolStats(pool, pPoolStats);
    13661 }
    13662 
    13664  VmaAllocator allocator,
    13665  VmaPool pool,
    13666  size_t* pLostAllocationCount)
    13667 {
    13668  VMA_ASSERT(allocator && pool);
    13669 
    13670  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13671 
    13672 #if VMA_RECORDING_ENABLED
    13673  if(allocator->GetRecorder() != VMA_NULL)
    13674  {
    13675  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13676  }
    13677 #endif
    13678 
    13679  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13680 }
    13681 
    13682 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13683 {
    13684  VMA_ASSERT(allocator && pool);
    13685 
    13686  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13687 
    13688  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13689 
    13690  return allocator->CheckPoolCorruption(pool);
    13691 }
    13692 
    13693 VkResult vmaAllocateMemory(
    13694  VmaAllocator allocator,
    13695  const VkMemoryRequirements* pVkMemoryRequirements,
    13696  const VmaAllocationCreateInfo* pCreateInfo,
    13697  VmaAllocation* pAllocation,
    13698  VmaAllocationInfo* pAllocationInfo)
    13699 {
    13700  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13701 
    13702  VMA_DEBUG_LOG("vmaAllocateMemory");
    13703 
    13704  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13705 
    13706  VkResult result = allocator->AllocateMemory(
    13707  *pVkMemoryRequirements,
    13708  false, // requiresDedicatedAllocation
    13709  false, // prefersDedicatedAllocation
    13710  VK_NULL_HANDLE, // dedicatedBuffer
    13711  VK_NULL_HANDLE, // dedicatedImage
    13712  *pCreateInfo,
    13713  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13714  pAllocation);
    13715 
    13716 #if VMA_RECORDING_ENABLED
    13717  if(allocator->GetRecorder() != VMA_NULL)
    13718  {
    13719  allocator->GetRecorder()->RecordAllocateMemory(
    13720  allocator->GetCurrentFrameIndex(),
    13721  *pVkMemoryRequirements,
    13722  *pCreateInfo,
    13723  *pAllocation);
    13724  }
    13725 #endif
    13726 
    13727  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13728  {
    13729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13730  }
    13731 
    13732  return result;
    13733 }
    13734 
    13736  VmaAllocator allocator,
    13737  VkBuffer buffer,
    13738  const VmaAllocationCreateInfo* pCreateInfo,
    13739  VmaAllocation* pAllocation,
    13740  VmaAllocationInfo* pAllocationInfo)
    13741 {
    13742  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13743 
    13744  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13745 
    13746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13747 
    13748  VkMemoryRequirements vkMemReq = {};
    13749  bool requiresDedicatedAllocation = false;
    13750  bool prefersDedicatedAllocation = false;
    13751  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13752  requiresDedicatedAllocation,
    13753  prefersDedicatedAllocation);
    13754 
    13755  VkResult result = allocator->AllocateMemory(
    13756  vkMemReq,
    13757  requiresDedicatedAllocation,
    13758  prefersDedicatedAllocation,
    13759  buffer, // dedicatedBuffer
    13760  VK_NULL_HANDLE, // dedicatedImage
    13761  *pCreateInfo,
    13762  VMA_SUBALLOCATION_TYPE_BUFFER,
    13763  pAllocation);
    13764 
    13765 #if VMA_RECORDING_ENABLED
    13766  if(allocator->GetRecorder() != VMA_NULL)
    13767  {
    13768  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13769  allocator->GetCurrentFrameIndex(),
    13770  vkMemReq,
    13771  requiresDedicatedAllocation,
    13772  prefersDedicatedAllocation,
    13773  *pCreateInfo,
    13774  *pAllocation);
    13775  }
    13776 #endif
    13777 
    13778  if(pAllocationInfo && result == VK_SUCCESS)
    13779  {
    13780  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13781  }
    13782 
    13783  return result;
    13784 }
    13785 
    13786 VkResult vmaAllocateMemoryForImage(
    13787  VmaAllocator allocator,
    13788  VkImage image,
    13789  const VmaAllocationCreateInfo* pCreateInfo,
    13790  VmaAllocation* pAllocation,
    13791  VmaAllocationInfo* pAllocationInfo)
    13792 {
    13793  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13794 
    13795  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13796 
    13797  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13798 
    13799  VkMemoryRequirements vkMemReq = {};
    13800  bool requiresDedicatedAllocation = false;
    13801  bool prefersDedicatedAllocation = false;
    13802  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13803  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13804 
    13805  VkResult result = allocator->AllocateMemory(
    13806  vkMemReq,
    13807  requiresDedicatedAllocation,
    13808  prefersDedicatedAllocation,
    13809  VK_NULL_HANDLE, // dedicatedBuffer
    13810  image, // dedicatedImage
    13811  *pCreateInfo,
    13812  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13813  pAllocation);
    13814 
    13815 #if VMA_RECORDING_ENABLED
    13816  if(allocator->GetRecorder() != VMA_NULL)
    13817  {
    13818  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13819  allocator->GetCurrentFrameIndex(),
    13820  vkMemReq,
    13821  requiresDedicatedAllocation,
    13822  prefersDedicatedAllocation,
    13823  *pCreateInfo,
    13824  *pAllocation);
    13825  }
    13826 #endif
    13827 
    13828  if(pAllocationInfo && result == VK_SUCCESS)
    13829  {
    13830  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13831  }
    13832 
    13833  return result;
    13834 }
    13835 
    13836 void vmaFreeMemory(
    13837  VmaAllocator allocator,
    13838  VmaAllocation allocation)
    13839 {
    13840  VMA_ASSERT(allocator);
    13841 
    13842  if(allocation == VK_NULL_HANDLE)
    13843  {
    13844  return;
    13845  }
    13846 
    13847  VMA_DEBUG_LOG("vmaFreeMemory");
    13848 
    13849  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13850 
    13851 #if VMA_RECORDING_ENABLED
    13852  if(allocator->GetRecorder() != VMA_NULL)
    13853  {
    13854  allocator->GetRecorder()->RecordFreeMemory(
    13855  allocator->GetCurrentFrameIndex(),
    13856  allocation);
    13857  }
    13858 #endif
    13859 
    13860  allocator->FreeMemory(allocation);
    13861 }
    13862 
    13864  VmaAllocator allocator,
    13865  VmaAllocation allocation,
    13866  VmaAllocationInfo* pAllocationInfo)
    13867 {
    13868  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13869 
    13870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordGetAllocationInfo(
    13876  allocator->GetCurrentFrameIndex(),
    13877  allocation);
    13878  }
    13879 #endif
    13880 
    13881  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13882 }
    13883 
    13884 VkBool32 vmaTouchAllocation(
    13885  VmaAllocator allocator,
    13886  VmaAllocation allocation)
    13887 {
    13888  VMA_ASSERT(allocator && allocation);
    13889 
    13890  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13891 
    13892 #if VMA_RECORDING_ENABLED
    13893  if(allocator->GetRecorder() != VMA_NULL)
    13894  {
    13895  allocator->GetRecorder()->RecordTouchAllocation(
    13896  allocator->GetCurrentFrameIndex(),
    13897  allocation);
    13898  }
    13899 #endif
    13900 
    13901  return allocator->TouchAllocation(allocation);
    13902 }
    13903 
    13905  VmaAllocator allocator,
    13906  VmaAllocation allocation,
    13907  void* pUserData)
    13908 {
    13909  VMA_ASSERT(allocator && allocation);
    13910 
    13911  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13912 
    13913  allocation->SetUserData(allocator, pUserData);
    13914 
    13915 #if VMA_RECORDING_ENABLED
    13916  if(allocator->GetRecorder() != VMA_NULL)
    13917  {
    13918  allocator->GetRecorder()->RecordSetAllocationUserData(
    13919  allocator->GetCurrentFrameIndex(),
    13920  allocation,
    13921  pUserData);
    13922  }
    13923 #endif
    13924 }
    13925 
    13927  VmaAllocator allocator,
    13928  VmaAllocation* pAllocation)
    13929 {
    13930  VMA_ASSERT(allocator && pAllocation);
    13931 
    13932  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13933 
    13934  allocator->CreateLostAllocation(pAllocation);
    13935 
    13936 #if VMA_RECORDING_ENABLED
    13937  if(allocator->GetRecorder() != VMA_NULL)
    13938  {
    13939  allocator->GetRecorder()->RecordCreateLostAllocation(
    13940  allocator->GetCurrentFrameIndex(),
    13941  *pAllocation);
    13942  }
    13943 #endif
    13944 }
    13945 
    13946 VkResult vmaMapMemory(
    13947  VmaAllocator allocator,
    13948  VmaAllocation allocation,
    13949  void** ppData)
    13950 {
    13951  VMA_ASSERT(allocator && allocation && ppData);
    13952 
    13953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13954 
    13955  VkResult res = allocator->Map(allocation, ppData);
    13956 
    13957 #if VMA_RECORDING_ENABLED
    13958  if(allocator->GetRecorder() != VMA_NULL)
    13959  {
    13960  allocator->GetRecorder()->RecordMapMemory(
    13961  allocator->GetCurrentFrameIndex(),
    13962  allocation);
    13963  }
    13964 #endif
    13965 
    13966  return res;
    13967 }
    13968 
    13969 void vmaUnmapMemory(
    13970  VmaAllocator allocator,
    13971  VmaAllocation allocation)
    13972 {
    13973  VMA_ASSERT(allocator && allocation);
    13974 
    13975  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13976 
    13977 #if VMA_RECORDING_ENABLED
    13978  if(allocator->GetRecorder() != VMA_NULL)
    13979  {
    13980  allocator->GetRecorder()->RecordUnmapMemory(
    13981  allocator->GetCurrentFrameIndex(),
    13982  allocation);
    13983  }
    13984 #endif
    13985 
    13986  allocator->Unmap(allocation);
    13987 }
    13988 
    13989 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13990 {
    13991  VMA_ASSERT(allocator && allocation);
    13992 
    13993  VMA_DEBUG_LOG("vmaFlushAllocation");
    13994 
    13995  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13996 
    13997  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    13998 
    13999 #if VMA_RECORDING_ENABLED
    14000  if(allocator->GetRecorder() != VMA_NULL)
    14001  {
    14002  allocator->GetRecorder()->RecordFlushAllocation(
    14003  allocator->GetCurrentFrameIndex(),
    14004  allocation, offset, size);
    14005  }
    14006 #endif
    14007 }
    14008 
    14009 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14010 {
    14011  VMA_ASSERT(allocator && allocation);
    14012 
    14013  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14014 
    14015  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14016 
    14017  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14018 
    14019 #if VMA_RECORDING_ENABLED
    14020  if(allocator->GetRecorder() != VMA_NULL)
    14021  {
    14022  allocator->GetRecorder()->RecordInvalidateAllocation(
    14023  allocator->GetCurrentFrameIndex(),
    14024  allocation, offset, size);
    14025  }
    14026 #endif
    14027 }
    14028 
    14029 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14030 {
    14031  VMA_ASSERT(allocator);
    14032 
    14033  VMA_DEBUG_LOG("vmaCheckCorruption");
    14034 
    14035  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14036 
    14037  return allocator->CheckCorruption(memoryTypeBits);
    14038 }
    14039 
    14040 VkResult vmaDefragment(
    14041  VmaAllocator allocator,
    14042  VmaAllocation* pAllocations,
    14043  size_t allocationCount,
    14044  VkBool32* pAllocationsChanged,
    14045  const VmaDefragmentationInfo *pDefragmentationInfo,
    14046  VmaDefragmentationStats* pDefragmentationStats)
    14047 {
    14048  VMA_ASSERT(allocator && pAllocations);
    14049 
    14050  VMA_DEBUG_LOG("vmaDefragment");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14055 }
    14056 
    14057 VkResult vmaBindBufferMemory(
    14058  VmaAllocator allocator,
    14059  VmaAllocation allocation,
    14060  VkBuffer buffer)
    14061 {
    14062  VMA_ASSERT(allocator && allocation && buffer);
    14063 
    14064  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14065 
    14066  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14067 
    14068  return allocator->BindBufferMemory(allocation, buffer);
    14069 }
    14070 
    14071 VkResult vmaBindImageMemory(
    14072  VmaAllocator allocator,
    14073  VmaAllocation allocation,
    14074  VkImage image)
    14075 {
    14076  VMA_ASSERT(allocator && allocation && image);
    14077 
    14078  VMA_DEBUG_LOG("vmaBindImageMemory");
    14079 
    14080  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14081 
    14082  return allocator->BindImageMemory(allocation, image);
    14083 }
    14084 
    14085 VkResult vmaCreateBuffer(
    14086  VmaAllocator allocator,
    14087  const VkBufferCreateInfo* pBufferCreateInfo,
    14088  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14089  VkBuffer* pBuffer,
    14090  VmaAllocation* pAllocation,
    14091  VmaAllocationInfo* pAllocationInfo)
    14092 {
    14093  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14094 
    14095  VMA_DEBUG_LOG("vmaCreateBuffer");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  *pBuffer = VK_NULL_HANDLE;
    14100  *pAllocation = VK_NULL_HANDLE;
    14101 
    14102  // 1. Create VkBuffer.
    14103  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14104  allocator->m_hDevice,
    14105  pBufferCreateInfo,
    14106  allocator->GetAllocationCallbacks(),
    14107  pBuffer);
    14108  if(res >= 0)
    14109  {
    14110  // 2. vkGetBufferMemoryRequirements.
    14111  VkMemoryRequirements vkMemReq = {};
    14112  bool requiresDedicatedAllocation = false;
    14113  bool prefersDedicatedAllocation = false;
    14114  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14115  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14116 
    14117  // Make sure alignment requirements for specific buffer usages reported
    14118  // in Physical Device Properties are included in alignment reported by memory requirements.
    14119  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14120  {
    14121  VMA_ASSERT(vkMemReq.alignment %
    14122  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14123  }
    14124  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14125  {
    14126  VMA_ASSERT(vkMemReq.alignment %
    14127  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14128  }
    14129  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14130  {
    14131  VMA_ASSERT(vkMemReq.alignment %
    14132  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14133  }
    14134 
    14135  // 3. Allocate memory using allocator.
    14136  res = allocator->AllocateMemory(
    14137  vkMemReq,
    14138  requiresDedicatedAllocation,
    14139  prefersDedicatedAllocation,
    14140  *pBuffer, // dedicatedBuffer
    14141  VK_NULL_HANDLE, // dedicatedImage
    14142  *pAllocationCreateInfo,
    14143  VMA_SUBALLOCATION_TYPE_BUFFER,
    14144  pAllocation);
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordCreateBuffer(
    14150  allocator->GetCurrentFrameIndex(),
    14151  *pBufferCreateInfo,
    14152  *pAllocationCreateInfo,
    14153  *pAllocation);
    14154  }
    14155 #endif
    14156 
    14157  if(res >= 0)
    14158  {
    14159  // 3. Bind buffer with memory.
    14160  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14161  if(res >= 0)
    14162  {
    14163  // All steps succeeded.
    14164  #if VMA_STATS_STRING_ENABLED
    14165  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14166  #endif
    14167  if(pAllocationInfo != VMA_NULL)
    14168  {
    14169  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14170  }
    14171 
    14172  return VK_SUCCESS;
    14173  }
    14174  allocator->FreeMemory(*pAllocation);
    14175  *pAllocation = VK_NULL_HANDLE;
    14176  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14177  *pBuffer = VK_NULL_HANDLE;
    14178  return res;
    14179  }
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  return res;
    14185 }
    14186 
    14187 void vmaDestroyBuffer(
    14188  VmaAllocator allocator,
    14189  VkBuffer buffer,
    14190  VmaAllocation allocation)
    14191 {
    14192  VMA_ASSERT(allocator);
    14193 
    14194  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14195  {
    14196  return;
    14197  }
    14198 
    14199  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14200 
    14201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14202 
    14203 #if VMA_RECORDING_ENABLED
    14204  if(allocator->GetRecorder() != VMA_NULL)
    14205  {
    14206  allocator->GetRecorder()->RecordDestroyBuffer(
    14207  allocator->GetCurrentFrameIndex(),
    14208  allocation);
    14209  }
    14210 #endif
    14211 
    14212  if(buffer != VK_NULL_HANDLE)
    14213  {
    14214  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14215  }
    14216 
    14217  if(allocation != VK_NULL_HANDLE)
    14218  {
    14219  allocator->FreeMemory(allocation);
    14220  }
    14221 }
    14222 
    14223 VkResult vmaCreateImage(
    14224  VmaAllocator allocator,
    14225  const VkImageCreateInfo* pImageCreateInfo,
    14226  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14227  VkImage* pImage,
    14228  VmaAllocation* pAllocation,
    14229  VmaAllocationInfo* pAllocationInfo)
    14230 {
    14231  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14232 
    14233  VMA_DEBUG_LOG("vmaCreateImage");
    14234 
    14235  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14236 
    14237  *pImage = VK_NULL_HANDLE;
    14238  *pAllocation = VK_NULL_HANDLE;
    14239 
    14240  // 1. Create VkImage.
    14241  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14242  allocator->m_hDevice,
    14243  pImageCreateInfo,
    14244  allocator->GetAllocationCallbacks(),
    14245  pImage);
    14246  if(res >= 0)
    14247  {
    14248  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14249  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14250  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14251 
    14252  // 2. Allocate memory using allocator.
    14253  VkMemoryRequirements vkMemReq = {};
    14254  bool requiresDedicatedAllocation = false;
    14255  bool prefersDedicatedAllocation = false;
    14256  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14257  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14258 
    14259  res = allocator->AllocateMemory(
    14260  vkMemReq,
    14261  requiresDedicatedAllocation,
    14262  prefersDedicatedAllocation,
    14263  VK_NULL_HANDLE, // dedicatedBuffer
    14264  *pImage, // dedicatedImage
    14265  *pAllocationCreateInfo,
    14266  suballocType,
    14267  pAllocation);
    14268 
    14269 #if VMA_RECORDING_ENABLED
    14270  if(allocator->GetRecorder() != VMA_NULL)
    14271  {
    14272  allocator->GetRecorder()->RecordCreateImage(
    14273  allocator->GetCurrentFrameIndex(),
    14274  *pImageCreateInfo,
    14275  *pAllocationCreateInfo,
    14276  *pAllocation);
    14277  }
    14278 #endif
    14279 
    14280  if(res >= 0)
    14281  {
    14282  // 3. Bind image with memory.
    14283  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14284  if(res >= 0)
    14285  {
    14286  // All steps succeeded.
    14287  #if VMA_STATS_STRING_ENABLED
    14288  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14289  #endif
    14290  if(pAllocationInfo != VMA_NULL)
    14291  {
    14292  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14293  }
    14294 
    14295  return VK_SUCCESS;
    14296  }
    14297  allocator->FreeMemory(*pAllocation);
    14298  *pAllocation = VK_NULL_HANDLE;
    14299  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14300  *pImage = VK_NULL_HANDLE;
    14301  return res;
    14302  }
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  return res;
    14308 }
    14309 
    14310 void vmaDestroyImage(
    14311  VmaAllocator allocator,
    14312  VkImage image,
    14313  VmaAllocation allocation)
    14314 {
    14315  VMA_ASSERT(allocator);
    14316 
    14317  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14318  {
    14319  return;
    14320  }
    14321 
    14322  VMA_DEBUG_LOG("vmaDestroyImage");
    14323 
    14324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14325 
    14326 #if VMA_RECORDING_ENABLED
    14327  if(allocator->GetRecorder() != VMA_NULL)
    14328  {
    14329  allocator->GetRecorder()->RecordDestroyImage(
    14330  allocator->GetCurrentFrameIndex(),
    14331  allocation);
    14332  }
    14333 #endif
    14334 
    14335  if(image != VK_NULL_HANDLE)
    14336  {
    14337  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14338  }
    14339  if(allocation != VK_NULL_HANDLE)
    14340  {
    14341  allocator->FreeMemory(allocation);
    14342  }
    14343 }
    14344 
    14345 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1571
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1872
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1468 /*
    1469 Define this macro to 0/1 to disable/enable support for recording functionality,
    1470 available through VmaAllocatorCreateInfo::pRecordSettings.
    1471 */
    1472 #ifndef VMA_RECORDING_ENABLED
    1473  #ifdef _WIN32
    1474  #define VMA_RECORDING_ENABLED 1
    1475  #else
    1476  #define VMA_RECORDING_ENABLED 0
    1477  #endif
    1478 #endif
    1479 
    1480 #ifndef NOMINMAX
    1481  #define NOMINMAX // For windows.h
    1482 #endif
    1483 
    1484 #include <vulkan/vulkan.h>
    1485 
    1486 #if VMA_RECORDING_ENABLED
    1487  #include <windows.h>
    1488 #endif
    1489 
    1490 #if !defined(VMA_DEDICATED_ALLOCATION)
    1491  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1492  #define VMA_DEDICATED_ALLOCATION 1
    1493  #else
    1494  #define VMA_DEDICATED_ALLOCATION 0
    1495  #endif
    1496 #endif
    1497 
    1507 VK_DEFINE_HANDLE(VmaAllocator)
    1508 
    1509 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1511  VmaAllocator allocator,
    1512  uint32_t memoryType,
    1513  VkDeviceMemory memory,
    1514  VkDeviceSize size);
    1516 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1517  VmaAllocator allocator,
    1518  uint32_t memoryType,
    1519  VkDeviceMemory memory,
    1520  VkDeviceSize size);
    1521 
    1535 
    1565 
    1568 typedef VkFlags VmaAllocatorCreateFlags;
    1569 
    1574 typedef struct VmaVulkanFunctions {
    1575  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1576  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1577  PFN_vkAllocateMemory vkAllocateMemory;
    1578  PFN_vkFreeMemory vkFreeMemory;
    1579  PFN_vkMapMemory vkMapMemory;
    1580  PFN_vkUnmapMemory vkUnmapMemory;
    1581  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1582  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1583  PFN_vkBindBufferMemory vkBindBufferMemory;
    1584  PFN_vkBindImageMemory vkBindImageMemory;
    1585  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1586  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1587  PFN_vkCreateBuffer vkCreateBuffer;
    1588  PFN_vkDestroyBuffer vkDestroyBuffer;
    1589  PFN_vkCreateImage vkCreateImage;
    1590  PFN_vkDestroyImage vkDestroyImage;
    1591 #if VMA_DEDICATED_ALLOCATION
    1592  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1593  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1594 #endif
    1596 
    1598 typedef enum VmaRecordFlagBits {
    1605 
    1608 typedef VkFlags VmaRecordFlags;
    1609 
    1611 typedef struct VmaRecordSettings
    1612 {
    1622  const char* pFilePath;
    1624 
    1627 {
    1631 
    1632  VkPhysicalDevice physicalDevice;
    1634 
    1635  VkDevice device;
    1637 
    1640 
    1641  const VkAllocationCallbacks* pAllocationCallbacks;
    1643 
    1682  const VkDeviceSize* pHeapSizeLimit;
    1703 
    1705 VkResult vmaCreateAllocator(
    1706  const VmaAllocatorCreateInfo* pCreateInfo,
    1707  VmaAllocator* pAllocator);
    1708 
    1710 void vmaDestroyAllocator(
    1711  VmaAllocator allocator);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1720 
    1726  VmaAllocator allocator,
    1727  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1728 
    1736  VmaAllocator allocator,
    1737  uint32_t memoryTypeIndex,
    1738  VkMemoryPropertyFlags* pFlags);
    1739 
    1749  VmaAllocator allocator,
    1750  uint32_t frameIndex);
    1751 
    1754 typedef struct VmaStatInfo
    1755 {
    1757  uint32_t blockCount;
    1763  VkDeviceSize usedBytes;
    1765  VkDeviceSize unusedBytes;
    1768 } VmaStatInfo;
    1769 
    1771 typedef struct VmaStats
    1772 {
    1773  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1774  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1776 } VmaStats;
    1777 
    1779 void vmaCalculateStats(
    1780  VmaAllocator allocator,
    1781  VmaStats* pStats);
    1782 
    1783 #define VMA_STATS_STRING_ENABLED 1
    1784 
    1785 #if VMA_STATS_STRING_ENABLED
    1786 
    1788 
    1790 void vmaBuildStatsString(
    1791  VmaAllocator allocator,
    1792  char** ppStatsString,
    1793  VkBool32 detailedMap);
    1794 
    1795 void vmaFreeStatsString(
    1796  VmaAllocator allocator,
    1797  char* pStatsString);
    1798 
    1799 #endif // #if VMA_STATS_STRING_ENABLED
    1800 
    1809 VK_DEFINE_HANDLE(VmaPool)
    1810 
    1811 typedef enum VmaMemoryUsage
    1812 {
    1861 } VmaMemoryUsage;
    1862 
    1877 
    1932 
    1945 
    1955 
    1962 
    1966 
    1968 {
    1981  VkMemoryPropertyFlags requiredFlags;
    1986  VkMemoryPropertyFlags preferredFlags;
    1994  uint32_t memoryTypeBits;
    2007  void* pUserData;
    2009 
    2026 VkResult vmaFindMemoryTypeIndex(
    2027  VmaAllocator allocator,
    2028  uint32_t memoryTypeBits,
    2029  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2030  uint32_t* pMemoryTypeIndex);
    2031 
    2045  VmaAllocator allocator,
    2046  const VkBufferCreateInfo* pBufferCreateInfo,
    2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2048  uint32_t* pMemoryTypeIndex);
    2049 
    2063  VmaAllocator allocator,
    2064  const VkImageCreateInfo* pImageCreateInfo,
    2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2066  uint32_t* pMemoryTypeIndex);
    2067 
    2088 
    2105 
    2116 
    2122 
    2125 typedef VkFlags VmaPoolCreateFlags;
    2126 
    2129 typedef struct VmaPoolCreateInfo {
    2144  VkDeviceSize blockSize;
    2173 
    2176 typedef struct VmaPoolStats {
    2179  VkDeviceSize size;
    2182  VkDeviceSize unusedSize;
    2195  VkDeviceSize unusedRangeSizeMax;
    2198  size_t blockCount;
    2199 } VmaPoolStats;
    2200 
    2207 VkResult vmaCreatePool(
    2208  VmaAllocator allocator,
    2209  const VmaPoolCreateInfo* pCreateInfo,
    2210  VmaPool* pPool);
    2211 
    2214 void vmaDestroyPool(
    2215  VmaAllocator allocator,
    2216  VmaPool pool);
    2217 
    2224 void vmaGetPoolStats(
    2225  VmaAllocator allocator,
    2226  VmaPool pool,
    2227  VmaPoolStats* pPoolStats);
    2228 
    2236  VmaAllocator allocator,
    2237  VmaPool pool,
    2238  size_t* pLostAllocationCount);
    2239 
    2254 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2255 
    2280 VK_DEFINE_HANDLE(VmaAllocation)
    2281 
    2282 
    2284 typedef struct VmaAllocationInfo {
    2289  uint32_t memoryType;
    2298  VkDeviceMemory deviceMemory;
    2303  VkDeviceSize offset;
    2308  VkDeviceSize size;
    2322  void* pUserData;
    2324 
    2335 VkResult vmaAllocateMemory(
    2336  VmaAllocator allocator,
    2337  const VkMemoryRequirements* pVkMemoryRequirements,
    2338  const VmaAllocationCreateInfo* pCreateInfo,
    2339  VmaAllocation* pAllocation,
    2340  VmaAllocationInfo* pAllocationInfo);
    2341 
    2349  VmaAllocator allocator,
    2350  VkBuffer buffer,
    2351  const VmaAllocationCreateInfo* pCreateInfo,
    2352  VmaAllocation* pAllocation,
    2353  VmaAllocationInfo* pAllocationInfo);
    2354 
    2356 VkResult vmaAllocateMemoryForImage(
    2357  VmaAllocator allocator,
    2358  VkImage image,
    2359  const VmaAllocationCreateInfo* pCreateInfo,
    2360  VmaAllocation* pAllocation,
    2361  VmaAllocationInfo* pAllocationInfo);
    2362 
    2364 void vmaFreeMemory(
    2365  VmaAllocator allocator,
    2366  VmaAllocation allocation);
    2367 
    2385  VmaAllocator allocator,
    2386  VmaAllocation allocation,
    2387  VmaAllocationInfo* pAllocationInfo);
    2388 
    2403 VkBool32 vmaTouchAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation);
    2406 
    2421  VmaAllocator allocator,
    2422  VmaAllocation allocation,
    2423  void* pUserData);
    2424 
    2436  VmaAllocator allocator,
    2437  VmaAllocation* pAllocation);
    2438 
    2473 VkResult vmaMapMemory(
    2474  VmaAllocator allocator,
    2475  VmaAllocation allocation,
    2476  void** ppData);
    2477 
    2482 void vmaUnmapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation);
    2485 
    2498 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2499 
    2512 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2513 
    2530 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2531 
    2533 typedef struct VmaDefragmentationInfo {
    2538  VkDeviceSize maxBytesToMove;
    2545 
    2547 typedef struct VmaDefragmentationStats {
    2549  VkDeviceSize bytesMoved;
    2551  VkDeviceSize bytesFreed;
    2557 
    2596 VkResult vmaDefragment(
    2597  VmaAllocator allocator,
    2598  VmaAllocation* pAllocations,
    2599  size_t allocationCount,
    2600  VkBool32* pAllocationsChanged,
    2601  const VmaDefragmentationInfo *pDefragmentationInfo,
    2602  VmaDefragmentationStats* pDefragmentationStats);
    2603 
    2616 VkResult vmaBindBufferMemory(
    2617  VmaAllocator allocator,
    2618  VmaAllocation allocation,
    2619  VkBuffer buffer);
    2620 
    2633 VkResult vmaBindImageMemory(
    2634  VmaAllocator allocator,
    2635  VmaAllocation allocation,
    2636  VkImage image);
    2637 
    2664 VkResult vmaCreateBuffer(
    2665  VmaAllocator allocator,
    2666  const VkBufferCreateInfo* pBufferCreateInfo,
    2667  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2668  VkBuffer* pBuffer,
    2669  VmaAllocation* pAllocation,
    2670  VmaAllocationInfo* pAllocationInfo);
    2671 
    2683 void vmaDestroyBuffer(
    2684  VmaAllocator allocator,
    2685  VkBuffer buffer,
    2686  VmaAllocation allocation);
    2687 
    2689 VkResult vmaCreateImage(
    2690  VmaAllocator allocator,
    2691  const VkImageCreateInfo* pImageCreateInfo,
    2692  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2693  VkImage* pImage,
    2694  VmaAllocation* pAllocation,
    2695  VmaAllocationInfo* pAllocationInfo);
    2696 
    2708 void vmaDestroyImage(
    2709  VmaAllocator allocator,
    2710  VkImage image,
    2711  VmaAllocation allocation);
    2712 
    2713 #ifdef __cplusplus
    2714 }
    2715 #endif
    2716 
    2717 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2718 
    2719 // For Visual Studio IntelliSense.
    2720 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2721 #define VMA_IMPLEMENTATION
    2722 #endif
    2723 
    2724 #ifdef VMA_IMPLEMENTATION
    2725 #undef VMA_IMPLEMENTATION
    2726 
    2727 #include <cstdint>
    2728 #include <cstdlib>
    2729 #include <cstring>
    2730 
    2731 /*******************************************************************************
    2732 CONFIGURATION SECTION
    2733 
    2734 Define some of these macros before each #include of this header or change them
    2735 here if you need other then default behavior depending on your environment.
    2736 */
    2737 
    2738 /*
    2739 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2740 internally, like:
    2741 
    2742  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2743 
    2744 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2745 VmaAllocatorCreateInfo::pVulkanFunctions.
    2746 */
    2747 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2748 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2749 #endif
    2750 
    2751 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2752 //#define VMA_USE_STL_CONTAINERS 1
    2753 
    2754 /* Set this macro to 1 to make the library including and using STL containers:
    2755 std::pair, std::vector, std::list, std::unordered_map.
    2756 
    2757 Set it to 0 or undefined to make the library using its own implementation of
    2758 the containers.
    2759 */
    2760 #if VMA_USE_STL_CONTAINERS
    2761  #define VMA_USE_STL_VECTOR 1
    2762  #define VMA_USE_STL_UNORDERED_MAP 1
    2763  #define VMA_USE_STL_LIST 1
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_VECTOR
    2767  #include <vector>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_UNORDERED_MAP
    2771  #include <unordered_map>
    2772 #endif
    2773 
    2774 #if VMA_USE_STL_LIST
    2775  #include <list>
    2776 #endif
    2777 
    2778 /*
    2779 Following headers are used in this CONFIGURATION section only, so feel free to
    2780 remove them if not needed.
    2781 */
    2782 #include <cassert> // for assert
    2783 #include <algorithm> // for min, max
    2784 #include <mutex> // for std::mutex
    2785 #include <atomic> // for std::atomic
    2786 
    2787 #ifndef VMA_NULL
    2788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2789  #define VMA_NULL nullptr
    2790 #endif
    2791 
    2792 #if defined(__APPLE__) || defined(__ANDROID__)
    2793 #include <cstdlib>
    2794 void *aligned_alloc(size_t alignment, size_t size)
    2795 {
    2796  // alignment must be >= sizeof(void*)
    2797  if(alignment < sizeof(void*))
    2798  {
    2799  alignment = sizeof(void*);
    2800  }
    2801 
    2802  void *pointer;
    2803  if(posix_memalign(&pointer, alignment, size) == 0)
    2804  return pointer;
    2805  return VMA_NULL;
    2806 }
    2807 #endif
    2808 
    2809 // If your compiler is not compatible with C++11 and definition of
    2810 // aligned_alloc() function is missing, uncommeting following line may help:
    2811 
    2812 //#include <malloc.h>
    2813 
    2814 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2815 #ifndef VMA_ASSERT
    2816  #ifdef _DEBUG
    2817  #define VMA_ASSERT(expr) assert(expr)
    2818  #else
    2819  #define VMA_ASSERT(expr)
    2820  #endif
    2821 #endif
    2822 
    2823 // Assert that will be called very often, like inside data structures e.g. operator[].
    2824 // Making it non-empty can make program slow.
    2825 #ifndef VMA_HEAVY_ASSERT
    2826  #ifdef _DEBUG
    2827  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2828  #else
    2829  #define VMA_HEAVY_ASSERT(expr)
    2830  #endif
    2831 #endif
    2832 
    2833 #ifndef VMA_ALIGN_OF
    2834  #define VMA_ALIGN_OF(type) (__alignof(type))
    2835 #endif
    2836 
    2837 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2838  #if defined(_WIN32)
    2839  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2840  #else
    2841  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2842  #endif
    2843 #endif
    2844 
    2845 #ifndef VMA_SYSTEM_FREE
    2846  #if defined(_WIN32)
    2847  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2848  #else
    2849  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2850  #endif
    2851 #endif
    2852 
    2853 #ifndef VMA_MIN
    2854  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_MAX
    2858  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2859 #endif
    2860 
    2861 #ifndef VMA_SWAP
    2862  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2863 #endif
    2864 
    2865 #ifndef VMA_SORT
    2866  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2867 #endif
    2868 
    2869 #ifndef VMA_DEBUG_LOG
    2870  #define VMA_DEBUG_LOG(format, ...)
    2871  /*
    2872  #define VMA_DEBUG_LOG(format, ...) do { \
    2873  printf(format, __VA_ARGS__); \
    2874  printf("\n"); \
    2875  } while(false)
    2876  */
    2877 #endif
    2878 
    2879 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2880 #if VMA_STATS_STRING_ENABLED
    2881  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2884  }
    2885  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2886  {
    2887  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2888  }
    2889  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2890  {
    2891  snprintf(outStr, strLen, "%p", ptr);
    2892  }
    2893 #endif
    2894 
    2895 #ifndef VMA_MUTEX
    2896  class VmaMutex
    2897  {
    2898  public:
    2899  VmaMutex() { }
    2900  ~VmaMutex() { }
    2901  void Lock() { m_Mutex.lock(); }
    2902  void Unlock() { m_Mutex.unlock(); }
    2903  private:
    2904  std::mutex m_Mutex;
    2905  };
    2906  #define VMA_MUTEX VmaMutex
    2907 #endif
    2908 
    2909 /*
    2910 If providing your own implementation, you need to implement a subset of std::atomic:
    2911 
    2912 - Constructor(uint32_t desired)
    2913 - uint32_t load() const
    2914 - void store(uint32_t desired)
    2915 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2916 */
    2917 #ifndef VMA_ATOMIC_UINT32
    2918  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2922 
    2926  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2927 #endif
    2928 
    2929 #ifndef VMA_DEBUG_ALIGNMENT
    2930 
    2934  #define VMA_DEBUG_ALIGNMENT (1)
    2935 #endif
    2936 
    2937 #ifndef VMA_DEBUG_MARGIN
    2938 
    2942  #define VMA_DEBUG_MARGIN (0)
    2943 #endif
    2944 
    2945 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2946 
    2950  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2951 #endif
    2952 
    2953 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2954 
    2959  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2963 
    2967  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2968 #endif
    2969 
    2970 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2971 
    2975  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2976 #endif
    2977 
    2978 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2979  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2984  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2986 #endif
    2987 
    2988 #ifndef VMA_CLASS_NO_COPY
    2989  #define VMA_CLASS_NO_COPY(className) \
    2990  private: \
    2991  className(const className&) = delete; \
    2992  className& operator=(const className&) = delete;
    2993 #endif
    2994 
    2995 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2996 
    2997 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2998 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2999 
    3000 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3001 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3002 
    3003 /*******************************************************************************
    3004 END OF CONFIGURATION
    3005 */
    3006 
    3007 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3008  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3009 
    3010 // Returns number of bits set to 1 in (v).
    3011 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3012 {
    3013  uint32_t c = v - ((v >> 1) & 0x55555555);
    3014  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3015  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3016  c = ((c >> 8) + c) & 0x00FF00FF;
    3017  c = ((c >> 16) + c) & 0x0000FFFF;
    3018  return c;
    3019 }
    3020 
    3021 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3022 // Use types like uint32_t, uint64_t as T.
    3023 template <typename T>
    3024 static inline T VmaAlignUp(T val, T align)
    3025 {
    3026  return (val + align - 1) / align * align;
    3027 }
    3028 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3029 // Use types like uint32_t, uint64_t as T.
    3030 template <typename T>
    3031 static inline T VmaAlignDown(T val, T align)
    3032 {
    3033  return val / align * align;
    3034 }
    3035 
    3036 // Division with mathematical rounding to nearest number.
    3037 template <typename T>
    3038 static inline T VmaRoundDiv(T x, T y)
    3039 {
    3040  return (x + (y / (T)2)) / y;
    3041 }
    3042 
    3043 /*
    3044 Returns true if given number is a power of two.
    3045 T must be unsigned integer number or signed integer but always nonnegative.
    3046 For 0 returns true.
    3047 */
    3048 template <typename T>
    3049 inline bool VmaIsPow2(T x)
    3050 {
    3051  return (x & (x-1)) == 0;
    3052 }
    3053 
    3054 // Returns smallest power of 2 greater or equal to v.
    3055 static inline uint32_t VmaNextPow2(uint32_t v)
    3056 {
    3057  v--;
    3058  v |= v >> 1;
    3059  v |= v >> 2;
    3060  v |= v >> 4;
    3061  v |= v >> 8;
    3062  v |= v >> 16;
    3063  v++;
    3064  return v;
    3065 }
    3066 static inline uint64_t VmaNextPow2(uint64_t v)
    3067 {
    3068  v--;
    3069  v |= v >> 1;
    3070  v |= v >> 2;
    3071  v |= v >> 4;
    3072  v |= v >> 8;
    3073  v |= v >> 16;
    3074  v |= v >> 32;
    3075  v++;
    3076  return v;
    3077 }
    3078 
    3079 // Returns largest power of 2 less or equal to v.
    3080 static inline uint32_t VmaPrevPow2(uint32_t v)
    3081 {
    3082  v |= v >> 1;
    3083  v |= v >> 2;
    3084  v |= v >> 4;
    3085  v |= v >> 8;
    3086  v |= v >> 16;
    3087  v = v ^ (v >> 1);
    3088  return v;
    3089 }
    3090 static inline uint64_t VmaPrevPow2(uint64_t v)
    3091 {
    3092  v |= v >> 1;
    3093  v |= v >> 2;
    3094  v |= v >> 4;
    3095  v |= v >> 8;
    3096  v |= v >> 16;
    3097  v |= v >> 32;
    3098  v = v ^ (v >> 1);
    3099  return v;
    3100 }
    3101 
    3102 static inline bool VmaStrIsEmpty(const char* pStr)
    3103 {
    3104  return pStr == VMA_NULL || *pStr == '\0';
    3105 }
    3106 
    3107 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3108 {
    3109  switch(algorithm)
    3110  {
    3112  return "Linear";
    3114  return "Buddy";
    3115  case 0:
    3116  return "Default";
    3117  default:
    3118  VMA_ASSERT(0);
    3119  return "";
    3120  }
    3121 }
    3122 
    3123 #ifndef VMA_SORT
    3124 
    3125 template<typename Iterator, typename Compare>
    3126 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3127 {
    3128  Iterator centerValue = end; --centerValue;
    3129  Iterator insertIndex = beg;
    3130  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3131  {
    3132  if(cmp(*memTypeIndex, *centerValue))
    3133  {
    3134  if(insertIndex != memTypeIndex)
    3135  {
    3136  VMA_SWAP(*memTypeIndex, *insertIndex);
    3137  }
    3138  ++insertIndex;
    3139  }
    3140  }
    3141  if(insertIndex != centerValue)
    3142  {
    3143  VMA_SWAP(*insertIndex, *centerValue);
    3144  }
    3145  return insertIndex;
    3146 }
    3147 
    3148 template<typename Iterator, typename Compare>
    3149 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3150 {
    3151  if(beg < end)
    3152  {
    3153  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3154  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3155  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3156  }
    3157 }
    3158 
    3159 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3160 
    3161 #endif // #ifndef VMA_SORT
    3162 
    3163 /*
    3164 Returns true if two memory blocks occupy overlapping pages.
    3165 ResourceA must be in less memory offset than ResourceB.
    3166 
    3167 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3168 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3169 */
    3170 static inline bool VmaBlocksOnSamePage(
    3171  VkDeviceSize resourceAOffset,
    3172  VkDeviceSize resourceASize,
    3173  VkDeviceSize resourceBOffset,
    3174  VkDeviceSize pageSize)
    3175 {
    3176  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3177  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3178  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3179  VkDeviceSize resourceBStart = resourceBOffset;
    3180  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3181  return resourceAEndPage == resourceBStartPage;
    3182 }
    3183 
    3184 enum VmaSuballocationType
    3185 {
    3186  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3187  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3188  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3189  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3190  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3191  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3192  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3193 };
    3194 
    3195 /*
    3196 Returns true if given suballocation types could conflict and must respect
    3197 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3198 or linear image and another one is optimal image. If type is unknown, behave
    3199 conservatively.
    3200 */
    3201 static inline bool VmaIsBufferImageGranularityConflict(
    3202  VmaSuballocationType suballocType1,
    3203  VmaSuballocationType suballocType2)
    3204 {
    3205  if(suballocType1 > suballocType2)
    3206  {
    3207  VMA_SWAP(suballocType1, suballocType2);
    3208  }
    3209 
    3210  switch(suballocType1)
    3211  {
    3212  case VMA_SUBALLOCATION_TYPE_FREE:
    3213  return false;
    3214  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3215  return true;
    3216  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3220  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3221  return
    3222  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3224  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3225  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3228  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3229  return false;
    3230  default:
    3231  VMA_ASSERT(0);
    3232  return true;
    3233  }
    3234 }
    3235 
    3236 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3237 {
    3238  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3239  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3240  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3241  {
    3242  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3243  }
    3244 }
    3245 
    3246 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3247 {
    3248  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3249  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3250  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3251  {
    3252  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3253  {
    3254  return false;
    3255  }
    3256  }
    3257  return true;
    3258 }
    3259 
    3260 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3261 struct VmaMutexLock
    3262 {
    3263  VMA_CLASS_NO_COPY(VmaMutexLock)
    3264 public:
    3265  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3266  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3267  {
    3268  if(m_pMutex)
    3269  {
    3270  m_pMutex->Lock();
    3271  }
    3272  }
    3273 
    3274  ~VmaMutexLock()
    3275  {
    3276  if(m_pMutex)
    3277  {
    3278  m_pMutex->Unlock();
    3279  }
    3280  }
    3281 
    3282 private:
    3283  VMA_MUTEX* m_pMutex;
    3284 };
    3285 
    3286 #if VMA_DEBUG_GLOBAL_MUTEX
    3287  static VMA_MUTEX gDebugGlobalMutex;
    3288  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3289 #else
    3290  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3291 #endif
    3292 
    3293 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3294 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3295 
    3296 /*
    3297 Performs binary search and returns iterator to first element that is greater or
    3298 equal to (key), according to comparison (cmp).
    3299 
    3300 Cmp should return true if first argument is less than second argument.
    3301 
    3302 Returned value is the found element, if present in the collection or place where
    3303 new element with value (key) should be inserted.
    3304 */
    3305 template <typename CmpLess, typename IterT, typename KeyT>
    3306 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3307 {
    3308  size_t down = 0, up = (end - beg);
    3309  while(down < up)
    3310  {
    3311  const size_t mid = (down + up) / 2;
    3312  if(cmp(*(beg+mid), key))
    3313  {
    3314  down = mid + 1;
    3315  }
    3316  else
    3317  {
    3318  up = mid;
    3319  }
    3320  }
    3321  return beg + down;
    3322 }
    3323 
    3325 // Memory allocation
    3326 
    3327 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3328 {
    3329  if((pAllocationCallbacks != VMA_NULL) &&
    3330  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3331  {
    3332  return (*pAllocationCallbacks->pfnAllocation)(
    3333  pAllocationCallbacks->pUserData,
    3334  size,
    3335  alignment,
    3336  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3337  }
    3338  else
    3339  {
    3340  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3341  }
    3342 }
    3343 
    3344 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3345 {
    3346  if((pAllocationCallbacks != VMA_NULL) &&
    3347  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3348  {
    3349  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3350  }
    3351  else
    3352  {
    3353  VMA_SYSTEM_FREE(ptr);
    3354  }
    3355 }
    3356 
    3357 template<typename T>
    3358 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3359 {
    3360  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3361 }
    3362 
    3363 template<typename T>
    3364 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3365 {
    3366  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3367 }
    3368 
    3369 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3370 
    3371 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3372 
    3373 template<typename T>
    3374 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3375 {
    3376  ptr->~T();
    3377  VmaFree(pAllocationCallbacks, ptr);
    3378 }
    3379 
    3380 template<typename T>
    3381 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3382 {
    3383  if(ptr != VMA_NULL)
    3384  {
    3385  for(size_t i = count; i--; )
    3386  {
    3387  ptr[i].~T();
    3388  }
    3389  VmaFree(pAllocationCallbacks, ptr);
    3390  }
    3391 }
    3392 
    3393 // STL-compatible allocator.
    3394 template<typename T>
    3395 class VmaStlAllocator
    3396 {
    3397 public:
    3398  const VkAllocationCallbacks* const m_pCallbacks;
    3399  typedef T value_type;
    3400 
    3401  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3402  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3403 
    3404  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3405  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3406 
    3407  template<typename U>
    3408  bool operator==(const VmaStlAllocator<U>& rhs) const
    3409  {
    3410  return m_pCallbacks == rhs.m_pCallbacks;
    3411  }
    3412  template<typename U>
    3413  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3414  {
    3415  return m_pCallbacks != rhs.m_pCallbacks;
    3416  }
    3417 
    3418  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3419 };
    3420 
    3421 #if VMA_USE_STL_VECTOR
    3422 
    3423 #define VmaVector std::vector
    3424 
    3425 template<typename T, typename allocatorT>
    3426 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3427 {
    3428  vec.insert(vec.begin() + index, item);
    3429 }
    3430 
    3431 template<typename T, typename allocatorT>
    3432 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3433 {
    3434  vec.erase(vec.begin() + index);
    3435 }
    3436 
    3437 #else // #if VMA_USE_STL_VECTOR
    3438 
    3439 /* Class with interface compatible with subset of std::vector.
    3440 T must be POD because constructors and destructors are not called and memcpy is
    3441 used for these objects. */
    3442 template<typename T, typename AllocatorT>
    3443 class VmaVector
    3444 {
    3445 public:
    3446  typedef T value_type;
    3447 
    3448  VmaVector(const AllocatorT& allocator) :
    3449  m_Allocator(allocator),
    3450  m_pArray(VMA_NULL),
    3451  m_Count(0),
    3452  m_Capacity(0)
    3453  {
    3454  }
    3455 
    3456  VmaVector(size_t count, const AllocatorT& allocator) :
    3457  m_Allocator(allocator),
    3458  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3459  m_Count(count),
    3460  m_Capacity(count)
    3461  {
    3462  }
    3463 
    3464  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3465  m_Allocator(src.m_Allocator),
    3466  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3467  m_Count(src.m_Count),
    3468  m_Capacity(src.m_Count)
    3469  {
    3470  if(m_Count != 0)
    3471  {
    3472  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3473  }
    3474  }
    3475 
    3476  ~VmaVector()
    3477  {
    3478  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3479  }
    3480 
    3481  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3482  {
    3483  if(&rhs != this)
    3484  {
    3485  resize(rhs.m_Count);
    3486  if(m_Count != 0)
    3487  {
    3488  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3489  }
    3490  }
    3491  return *this;
    3492  }
    3493 
    3494  bool empty() const { return m_Count == 0; }
    3495  size_t size() const { return m_Count; }
    3496  T* data() { return m_pArray; }
    3497  const T* data() const { return m_pArray; }
    3498 
    3499  T& operator[](size_t index)
    3500  {
    3501  VMA_HEAVY_ASSERT(index < m_Count);
    3502  return m_pArray[index];
    3503  }
    3504  const T& operator[](size_t index) const
    3505  {
    3506  VMA_HEAVY_ASSERT(index < m_Count);
    3507  return m_pArray[index];
    3508  }
    3509 
    3510  T& front()
    3511  {
    3512  VMA_HEAVY_ASSERT(m_Count > 0);
    3513  return m_pArray[0];
    3514  }
    3515  const T& front() const
    3516  {
    3517  VMA_HEAVY_ASSERT(m_Count > 0);
    3518  return m_pArray[0];
    3519  }
    3520  T& back()
    3521  {
    3522  VMA_HEAVY_ASSERT(m_Count > 0);
    3523  return m_pArray[m_Count - 1];
    3524  }
    3525  const T& back() const
    3526  {
    3527  VMA_HEAVY_ASSERT(m_Count > 0);
    3528  return m_pArray[m_Count - 1];
    3529  }
    3530 
    3531  void reserve(size_t newCapacity, bool freeMemory = false)
    3532  {
    3533  newCapacity = VMA_MAX(newCapacity, m_Count);
    3534 
    3535  if((newCapacity < m_Capacity) && !freeMemory)
    3536  {
    3537  newCapacity = m_Capacity;
    3538  }
    3539 
    3540  if(newCapacity != m_Capacity)
    3541  {
    3542  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3543  if(m_Count != 0)
    3544  {
    3545  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3546  }
    3547  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3548  m_Capacity = newCapacity;
    3549  m_pArray = newArray;
    3550  }
    3551  }
    3552 
    3553  void resize(size_t newCount, bool freeMemory = false)
    3554  {
    3555  size_t newCapacity = m_Capacity;
    3556  if(newCount > m_Capacity)
    3557  {
    3558  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3559  }
    3560  else if(freeMemory)
    3561  {
    3562  newCapacity = newCount;
    3563  }
    3564 
    3565  if(newCapacity != m_Capacity)
    3566  {
    3567  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3568  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3569  if(elementsToCopy != 0)
    3570  {
    3571  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3572  }
    3573  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3574  m_Capacity = newCapacity;
    3575  m_pArray = newArray;
    3576  }
    3577 
    3578  m_Count = newCount;
    3579  }
    3580 
    3581  void clear(bool freeMemory = false)
    3582  {
    3583  resize(0, freeMemory);
    3584  }
    3585 
    3586  void insert(size_t index, const T& src)
    3587  {
    3588  VMA_HEAVY_ASSERT(index <= m_Count);
    3589  const size_t oldCount = size();
    3590  resize(oldCount + 1);
    3591  if(index < oldCount)
    3592  {
    3593  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3594  }
    3595  m_pArray[index] = src;
    3596  }
    3597 
    3598  void remove(size_t index)
    3599  {
    3600  VMA_HEAVY_ASSERT(index < m_Count);
    3601  const size_t oldCount = size();
    3602  if(index < oldCount - 1)
    3603  {
    3604  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3605  }
    3606  resize(oldCount - 1);
    3607  }
    3608 
    3609  void push_back(const T& src)
    3610  {
    3611  const size_t newIndex = size();
    3612  resize(newIndex + 1);
    3613  m_pArray[newIndex] = src;
    3614  }
    3615 
    3616  void pop_back()
    3617  {
    3618  VMA_HEAVY_ASSERT(m_Count > 0);
    3619  resize(size() - 1);
    3620  }
    3621 
    3622  void push_front(const T& src)
    3623  {
    3624  insert(0, src);
    3625  }
    3626 
    3627  void pop_front()
    3628  {
    3629  VMA_HEAVY_ASSERT(m_Count > 0);
    3630  remove(0);
    3631  }
    3632 
    3633  typedef T* iterator;
    3634 
    3635  iterator begin() { return m_pArray; }
    3636  iterator end() { return m_pArray + m_Count; }
    3637 
    3638 private:
    3639  AllocatorT m_Allocator;
    3640  T* m_pArray;
    3641  size_t m_Count;
    3642  size_t m_Capacity;
    3643 };
    3644 
    3645 template<typename T, typename allocatorT>
    3646 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3647 {
    3648  vec.insert(index, item);
    3649 }
    3650 
    3651 template<typename T, typename allocatorT>
    3652 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3653 {
    3654  vec.remove(index);
    3655 }
    3656 
    3657 #endif // #if VMA_USE_STL_VECTOR
    3658 
    3659 template<typename CmpLess, typename VectorT>
    3660 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3661 {
    3662  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3663  vector.data(),
    3664  vector.data() + vector.size(),
    3665  value,
    3666  CmpLess()) - vector.data();
    3667  VmaVectorInsert(vector, indexToInsert, value);
    3668  return indexToInsert;
    3669 }
    3670 
    3671 template<typename CmpLess, typename VectorT>
    3672 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3673 {
    3674  CmpLess comparator;
    3675  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3676  vector.begin(),
    3677  vector.end(),
    3678  value,
    3679  comparator);
    3680  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3681  {
    3682  size_t indexToRemove = it - vector.begin();
    3683  VmaVectorRemove(vector, indexToRemove);
    3684  return true;
    3685  }
    3686  return false;
    3687 }
    3688 
    3689 template<typename CmpLess, typename IterT, typename KeyT>
    3690 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3691 {
    3692  CmpLess comparator;
    3693  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3694  beg, end, value, comparator);
    3695  if(it == end ||
    3696  (!comparator(*it, value) && !comparator(value, *it)))
    3697  {
    3698  return it;
    3699  }
    3700  return end;
    3701 }
    3702 
    3704 // class VmaPoolAllocator
    3705 
    3706 /*
    3707 Allocator for objects of type T using a list of arrays (pools) to speed up
    3708 allocation. Number of elements that can be allocated is not bounded because
    3709 allocator can create multiple blocks.
    3710 */
    3711 template<typename T>
    3712 class VmaPoolAllocator
    3713 {
    3714  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3715 public:
    3716  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3717  ~VmaPoolAllocator();
    3718  void Clear();
    3719  T* Alloc();
    3720  void Free(T* ptr);
    3721 
    3722 private:
    3723  union Item
    3724  {
    3725  uint32_t NextFreeIndex;
    3726  T Value;
    3727  };
    3728 
    3729  struct ItemBlock
    3730  {
    3731  Item* pItems;
    3732  uint32_t FirstFreeIndex;
    3733  };
    3734 
    3735  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3736  size_t m_ItemsPerBlock;
    3737  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3738 
    3739  ItemBlock& CreateNewBlock();
    3740 };
    3741 
    3742 template<typename T>
    3743 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3744  m_pAllocationCallbacks(pAllocationCallbacks),
    3745  m_ItemsPerBlock(itemsPerBlock),
    3746  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3747 {
    3748  VMA_ASSERT(itemsPerBlock > 0);
    3749 }
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3753 {
    3754  Clear();
    3755 }
    3756 
    3757 template<typename T>
    3758 void VmaPoolAllocator<T>::Clear()
    3759 {
    3760  for(size_t i = m_ItemBlocks.size(); i--; )
    3761  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3762  m_ItemBlocks.clear();
    3763 }
    3764 
    3765 template<typename T>
    3766 T* VmaPoolAllocator<T>::Alloc()
    3767 {
    3768  for(size_t i = m_ItemBlocks.size(); i--; )
    3769  {
    3770  ItemBlock& block = m_ItemBlocks[i];
    3771  // This block has some free items: Use first one.
    3772  if(block.FirstFreeIndex != UINT32_MAX)
    3773  {
    3774  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3775  block.FirstFreeIndex = pItem->NextFreeIndex;
    3776  return &pItem->Value;
    3777  }
    3778  }
    3779 
    3780  // No block has free item: Create new one and use it.
    3781  ItemBlock& newBlock = CreateNewBlock();
    3782  Item* const pItem = &newBlock.pItems[0];
    3783  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3784  return &pItem->Value;
    3785 }
    3786 
    3787 template<typename T>
    3788 void VmaPoolAllocator<T>::Free(T* ptr)
    3789 {
    3790  // Search all memory blocks to find ptr.
    3791  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3792  {
    3793  ItemBlock& block = m_ItemBlocks[i];
    3794 
    3795  // Casting to union.
    3796  Item* pItemPtr;
    3797  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3798 
    3799  // Check if pItemPtr is in address range of this block.
    3800  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3801  {
    3802  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3803  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3804  block.FirstFreeIndex = index;
    3805  return;
    3806  }
    3807  }
    3808  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3809 }
    3810 
    3811 template<typename T>
    3812 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3813 {
    3814  ItemBlock newBlock = {
    3815  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3816 
    3817  m_ItemBlocks.push_back(newBlock);
    3818 
    3819  // Setup singly-linked list of all free items in this block.
    3820  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3821  newBlock.pItems[i].NextFreeIndex = i + 1;
    3822  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3823  return m_ItemBlocks.back();
    3824 }
    3825 
    3827 // class VmaRawList, VmaList
    3828 
    3829 #if VMA_USE_STL_LIST
    3830 
    3831 #define VmaList std::list
    3832 
    3833 #else // #if VMA_USE_STL_LIST
    3834 
    3835 template<typename T>
    3836 struct VmaListItem
    3837 {
    3838  VmaListItem* pPrev;
    3839  VmaListItem* pNext;
    3840  T Value;
    3841 };
    3842 
    3843 // Doubly linked list.
    3844 template<typename T>
    3845 class VmaRawList
    3846 {
    3847  VMA_CLASS_NO_COPY(VmaRawList)
    3848 public:
    3849  typedef VmaListItem<T> ItemType;
    3850 
    3851  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3852  ~VmaRawList();
    3853  void Clear();
    3854 
    3855  size_t GetCount() const { return m_Count; }
    3856  bool IsEmpty() const { return m_Count == 0; }
    3857 
    3858  ItemType* Front() { return m_pFront; }
    3859  const ItemType* Front() const { return m_pFront; }
    3860  ItemType* Back() { return m_pBack; }
    3861  const ItemType* Back() const { return m_pBack; }
    3862 
    3863  ItemType* PushBack();
    3864  ItemType* PushFront();
    3865  ItemType* PushBack(const T& value);
    3866  ItemType* PushFront(const T& value);
    3867  void PopBack();
    3868  void PopFront();
    3869 
    3870  // Item can be null - it means PushBack.
    3871  ItemType* InsertBefore(ItemType* pItem);
    3872  // Item can be null - it means PushFront.
    3873  ItemType* InsertAfter(ItemType* pItem);
    3874 
    3875  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3876  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3877 
    3878  void Remove(ItemType* pItem);
    3879 
    3880 private:
    3881  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3882  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3883  ItemType* m_pFront;
    3884  ItemType* m_pBack;
    3885  size_t m_Count;
    3886 };
    3887 
    3888 template<typename T>
    3889 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3890  m_pAllocationCallbacks(pAllocationCallbacks),
    3891  m_ItemAllocator(pAllocationCallbacks, 128),
    3892  m_pFront(VMA_NULL),
    3893  m_pBack(VMA_NULL),
    3894  m_Count(0)
    3895 {
    3896 }
    3897 
    3898 template<typename T>
    3899 VmaRawList<T>::~VmaRawList()
    3900 {
    3901  // Intentionally not calling Clear, because that would be unnecessary
    3902  // computations to return all items to m_ItemAllocator as free.
    3903 }
    3904 
    3905 template<typename T>
    3906 void VmaRawList<T>::Clear()
    3907 {
    3908  if(IsEmpty() == false)
    3909  {
    3910  ItemType* pItem = m_pBack;
    3911  while(pItem != VMA_NULL)
    3912  {
    3913  ItemType* const pPrevItem = pItem->pPrev;
    3914  m_ItemAllocator.Free(pItem);
    3915  pItem = pPrevItem;
    3916  }
    3917  m_pFront = VMA_NULL;
    3918  m_pBack = VMA_NULL;
    3919  m_Count = 0;
    3920  }
    3921 }
    3922 
    3923 template<typename T>
    3924 VmaListItem<T>* VmaRawList<T>::PushBack()
    3925 {
    3926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3927  pNewItem->pNext = VMA_NULL;
    3928  if(IsEmpty())
    3929  {
    3930  pNewItem->pPrev = VMA_NULL;
    3931  m_pFront = pNewItem;
    3932  m_pBack = pNewItem;
    3933  m_Count = 1;
    3934  }
    3935  else
    3936  {
    3937  pNewItem->pPrev = m_pBack;
    3938  m_pBack->pNext = pNewItem;
    3939  m_pBack = pNewItem;
    3940  ++m_Count;
    3941  }
    3942  return pNewItem;
    3943 }
    3944 
    3945 template<typename T>
    3946 VmaListItem<T>* VmaRawList<T>::PushFront()
    3947 {
    3948  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3949  pNewItem->pPrev = VMA_NULL;
    3950  if(IsEmpty())
    3951  {
    3952  pNewItem->pNext = VMA_NULL;
    3953  m_pFront = pNewItem;
    3954  m_pBack = pNewItem;
    3955  m_Count = 1;
    3956  }
    3957  else
    3958  {
    3959  pNewItem->pNext = m_pFront;
    3960  m_pFront->pPrev = pNewItem;
    3961  m_pFront = pNewItem;
    3962  ++m_Count;
    3963  }
    3964  return pNewItem;
    3965 }
    3966 
    3967 template<typename T>
    3968 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3969 {
    3970  ItemType* const pNewItem = PushBack();
    3971  pNewItem->Value = value;
    3972  return pNewItem;
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3977 {
    3978  ItemType* const pNewItem = PushFront();
    3979  pNewItem->Value = value;
    3980  return pNewItem;
    3981 }
    3982 
    3983 template<typename T>
    3984 void VmaRawList<T>::PopBack()
    3985 {
    3986  VMA_HEAVY_ASSERT(m_Count > 0);
    3987  ItemType* const pBackItem = m_pBack;
    3988  ItemType* const pPrevItem = pBackItem->pPrev;
    3989  if(pPrevItem != VMA_NULL)
    3990  {
    3991  pPrevItem->pNext = VMA_NULL;
    3992  }
    3993  m_pBack = pPrevItem;
    3994  m_ItemAllocator.Free(pBackItem);
    3995  --m_Count;
    3996 }
    3997 
    3998 template<typename T>
    3999 void VmaRawList<T>::PopFront()
    4000 {
    4001  VMA_HEAVY_ASSERT(m_Count > 0);
    4002  ItemType* const pFrontItem = m_pFront;
    4003  ItemType* const pNextItem = pFrontItem->pNext;
    4004  if(pNextItem != VMA_NULL)
    4005  {
    4006  pNextItem->pPrev = VMA_NULL;
    4007  }
    4008  m_pFront = pNextItem;
    4009  m_ItemAllocator.Free(pFrontItem);
    4010  --m_Count;
    4011 }
    4012 
    4013 template<typename T>
    4014 void VmaRawList<T>::Remove(ItemType* pItem)
    4015 {
    4016  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4017  VMA_HEAVY_ASSERT(m_Count > 0);
    4018 
    4019  if(pItem->pPrev != VMA_NULL)
    4020  {
    4021  pItem->pPrev->pNext = pItem->pNext;
    4022  }
    4023  else
    4024  {
    4025  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4026  m_pFront = pItem->pNext;
    4027  }
    4028 
    4029  if(pItem->pNext != VMA_NULL)
    4030  {
    4031  pItem->pNext->pPrev = pItem->pPrev;
    4032  }
    4033  else
    4034  {
    4035  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4036  m_pBack = pItem->pPrev;
    4037  }
    4038 
    4039  m_ItemAllocator.Free(pItem);
    4040  --m_Count;
    4041 }
    4042 
    4043 template<typename T>
    4044 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4045 {
    4046  if(pItem != VMA_NULL)
    4047  {
    4048  ItemType* const prevItem = pItem->pPrev;
    4049  ItemType* const newItem = m_ItemAllocator.Alloc();
    4050  newItem->pPrev = prevItem;
    4051  newItem->pNext = pItem;
    4052  pItem->pPrev = newItem;
    4053  if(prevItem != VMA_NULL)
    4054  {
    4055  prevItem->pNext = newItem;
    4056  }
    4057  else
    4058  {
    4059  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4060  m_pFront = newItem;
    4061  }
    4062  ++m_Count;
    4063  return newItem;
    4064  }
    4065  else
    4066  return PushBack();
    4067 }
    4068 
    4069 template<typename T>
    4070 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4071 {
    4072  if(pItem != VMA_NULL)
    4073  {
    4074  ItemType* const nextItem = pItem->pNext;
    4075  ItemType* const newItem = m_ItemAllocator.Alloc();
    4076  newItem->pNext = nextItem;
    4077  newItem->pPrev = pItem;
    4078  pItem->pNext = newItem;
    4079  if(nextItem != VMA_NULL)
    4080  {
    4081  nextItem->pPrev = newItem;
    4082  }
    4083  else
    4084  {
    4085  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4086  m_pBack = newItem;
    4087  }
    4088  ++m_Count;
    4089  return newItem;
    4090  }
    4091  else
    4092  return PushFront();
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4097 {
    4098  ItemType* const newItem = InsertBefore(pItem);
    4099  newItem->Value = value;
    4100  return newItem;
    4101 }
    4102 
    4103 template<typename T>
    4104 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4105 {
    4106  ItemType* const newItem = InsertAfter(pItem);
    4107  newItem->Value = value;
    4108  return newItem;
    4109 }
    4110 
    4111 template<typename T, typename AllocatorT>
    4112 class VmaList
    4113 {
    4114  VMA_CLASS_NO_COPY(VmaList)
    4115 public:
    4116  class iterator
    4117  {
    4118  public:
    4119  iterator() :
    4120  m_pList(VMA_NULL),
    4121  m_pItem(VMA_NULL)
    4122  {
    4123  }
    4124 
    4125  T& operator*() const
    4126  {
    4127  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4128  return m_pItem->Value;
    4129  }
    4130  T* operator->() const
    4131  {
    4132  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4133  return &m_pItem->Value;
    4134  }
    4135 
    4136  iterator& operator++()
    4137  {
    4138  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4139  m_pItem = m_pItem->pNext;
    4140  return *this;
    4141  }
    4142  iterator& operator--()
    4143  {
    4144  if(m_pItem != VMA_NULL)
    4145  {
    4146  m_pItem = m_pItem->pPrev;
    4147  }
    4148  else
    4149  {
    4150  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4151  m_pItem = m_pList->Back();
    4152  }
    4153  return *this;
    4154  }
    4155 
    4156  iterator operator++(int)
    4157  {
    4158  iterator result = *this;
    4159  ++*this;
    4160  return result;
    4161  }
    4162  iterator operator--(int)
    4163  {
    4164  iterator result = *this;
    4165  --*this;
    4166  return result;
    4167  }
    4168 
    4169  bool operator==(const iterator& rhs) const
    4170  {
    4171  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4172  return m_pItem == rhs.m_pItem;
    4173  }
    4174  bool operator!=(const iterator& rhs) const
    4175  {
    4176  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4177  return m_pItem != rhs.m_pItem;
    4178  }
    4179 
    4180  private:
    4181  VmaRawList<T>* m_pList;
    4182  VmaListItem<T>* m_pItem;
    4183 
    4184  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4185  m_pList(pList),
    4186  m_pItem(pItem)
    4187  {
    4188  }
    4189 
    4190  friend class VmaList<T, AllocatorT>;
    4191  };
    4192 
    4193  class const_iterator
    4194  {
    4195  public:
    4196  const_iterator() :
    4197  m_pList(VMA_NULL),
    4198  m_pItem(VMA_NULL)
    4199  {
    4200  }
    4201 
    4202  const_iterator(const iterator& src) :
    4203  m_pList(src.m_pList),
    4204  m_pItem(src.m_pItem)
    4205  {
    4206  }
    4207 
    4208  const T& operator*() const
    4209  {
    4210  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4211  return m_pItem->Value;
    4212  }
    4213  const T* operator->() const
    4214  {
    4215  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4216  return &m_pItem->Value;
    4217  }
    4218 
    4219  const_iterator& operator++()
    4220  {
    4221  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4222  m_pItem = m_pItem->pNext;
    4223  return *this;
    4224  }
    4225  const_iterator& operator--()
    4226  {
    4227  if(m_pItem != VMA_NULL)
    4228  {
    4229  m_pItem = m_pItem->pPrev;
    4230  }
    4231  else
    4232  {
    4233  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4234  m_pItem = m_pList->Back();
    4235  }
    4236  return *this;
    4237  }
    4238 
    4239  const_iterator operator++(int)
    4240  {
    4241  const_iterator result = *this;
    4242  ++*this;
    4243  return result;
    4244  }
    4245  const_iterator operator--(int)
    4246  {
    4247  const_iterator result = *this;
    4248  --*this;
    4249  return result;
    4250  }
    4251 
    4252  bool operator==(const const_iterator& rhs) const
    4253  {
    4254  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4255  return m_pItem == rhs.m_pItem;
    4256  }
    4257  bool operator!=(const const_iterator& rhs) const
    4258  {
    4259  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4260  return m_pItem != rhs.m_pItem;
    4261  }
    4262 
    4263  private:
    4264  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4265  m_pList(pList),
    4266  m_pItem(pItem)
    4267  {
    4268  }
    4269 
    4270  const VmaRawList<T>* m_pList;
    4271  const VmaListItem<T>* m_pItem;
    4272 
    4273  friend class VmaList<T, AllocatorT>;
    4274  };
    4275 
    4276  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4277 
    4278  bool empty() const { return m_RawList.IsEmpty(); }
    4279  size_t size() const { return m_RawList.GetCount(); }
    4280 
    4281  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4282  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4283 
    4284  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4285  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4286 
    4287  void clear() { m_RawList.Clear(); }
    4288  void push_back(const T& value) { m_RawList.PushBack(value); }
    4289  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4290  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4291 
    4292 private:
    4293  VmaRawList<T> m_RawList;
    4294 };
    4295 
    4296 #endif // #if VMA_USE_STL_LIST
    4297 
    4299 // class VmaMap
    4300 
    4301 // Unused in this version.
    4302 #if 0
    4303 
    4304 #if VMA_USE_STL_UNORDERED_MAP
    4305 
    4306 #define VmaPair std::pair
    4307 
    4308 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4309  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4310 
    4311 #else // #if VMA_USE_STL_UNORDERED_MAP
    4312 
    4313 template<typename T1, typename T2>
    4314 struct VmaPair
    4315 {
    4316  T1 first;
    4317  T2 second;
    4318 
    4319  VmaPair() : first(), second() { }
    4320  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4321 };
    4322 
    4323 /* Class compatible with subset of interface of std::unordered_map.
    4324 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4325 */
    4326 template<typename KeyT, typename ValueT>
    4327 class VmaMap
    4328 {
    4329 public:
    4330  typedef VmaPair<KeyT, ValueT> PairType;
    4331  typedef PairType* iterator;
    4332 
    4333  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4334 
    4335  iterator begin() { return m_Vector.begin(); }
    4336  iterator end() { return m_Vector.end(); }
    4337 
    4338  void insert(const PairType& pair);
    4339  iterator find(const KeyT& key);
    4340  void erase(iterator it);
    4341 
    4342 private:
    4343  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4344 };
    4345 
    4346 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4347 
    4348 template<typename FirstT, typename SecondT>
    4349 struct VmaPairFirstLess
    4350 {
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4352  {
    4353  return lhs.first < rhs.first;
    4354  }
    4355  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4356  {
    4357  return lhs.first < rhsFirst;
    4358  }
    4359 };
    4360 
    4361 template<typename KeyT, typename ValueT>
    4362 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4363 {
    4364  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4365  m_Vector.data(),
    4366  m_Vector.data() + m_Vector.size(),
    4367  pair,
    4368  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4369  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4370 }
    4371 
    4372 template<typename KeyT, typename ValueT>
    4373 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4374 {
    4375  PairType* it = VmaBinaryFindFirstNotLess(
    4376  m_Vector.data(),
    4377  m_Vector.data() + m_Vector.size(),
    4378  key,
    4379  VmaPairFirstLess<KeyT, ValueT>());
    4380  if((it != m_Vector.end()) && (it->first == key))
    4381  {
    4382  return it;
    4383  }
    4384  else
    4385  {
    4386  return m_Vector.end();
    4387  }
    4388 }
    4389 
    4390 template<typename KeyT, typename ValueT>
    4391 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4392 {
    4393  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4394 }
    4395 
    4396 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4397 
    4398 #endif // #if 0
    4399 
    4401 
    4402 class VmaDeviceMemoryBlock;
    4403 
    4404 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4405 
    4406 struct VmaAllocation_T
    4407 {
    4408  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4409 private:
    4410  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4411 
    4412  enum FLAGS
    4413  {
    4414  FLAG_USER_DATA_STRING = 0x01,
    4415  };
    4416 
    4417 public:
    4418  enum ALLOCATION_TYPE
    4419  {
    4420  ALLOCATION_TYPE_NONE,
    4421  ALLOCATION_TYPE_BLOCK,
    4422  ALLOCATION_TYPE_DEDICATED,
    4423  };
    4424 
    4425  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4426  m_Alignment(1),
    4427  m_Size(0),
    4428  m_pUserData(VMA_NULL),
    4429  m_LastUseFrameIndex(currentFrameIndex),
    4430  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4431  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4432  m_MapCount(0),
    4433  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4434  {
    4435 #if VMA_STATS_STRING_ENABLED
    4436  m_CreationFrameIndex = currentFrameIndex;
    4437  m_BufferImageUsage = 0;
    4438 #endif
    4439  }
    4440 
    4441  ~VmaAllocation_T()
    4442  {
    4443  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4444 
    4445  // Check if owned string was freed.
    4446  VMA_ASSERT(m_pUserData == VMA_NULL);
    4447  }
    4448 
    4449  void InitBlockAllocation(
    4450  VmaPool hPool,
    4451  VmaDeviceMemoryBlock* block,
    4452  VkDeviceSize offset,
    4453  VkDeviceSize alignment,
    4454  VkDeviceSize size,
    4455  VmaSuballocationType suballocationType,
    4456  bool mapped,
    4457  bool canBecomeLost)
    4458  {
    4459  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4460  VMA_ASSERT(block != VMA_NULL);
    4461  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4462  m_Alignment = alignment;
    4463  m_Size = size;
    4464  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4465  m_SuballocationType = (uint8_t)suballocationType;
    4466  m_BlockAllocation.m_hPool = hPool;
    4467  m_BlockAllocation.m_Block = block;
    4468  m_BlockAllocation.m_Offset = offset;
    4469  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4470  }
    4471 
    4472  void InitLost()
    4473  {
    4474  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4475  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4476  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4477  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4478  m_BlockAllocation.m_Block = VMA_NULL;
    4479  m_BlockAllocation.m_Offset = 0;
    4480  m_BlockAllocation.m_CanBecomeLost = true;
    4481  }
    4482 
    4483  void ChangeBlockAllocation(
    4484  VmaAllocator hAllocator,
    4485  VmaDeviceMemoryBlock* block,
    4486  VkDeviceSize offset);
    4487 
    4488  // pMappedData not null means allocation is created with MAPPED flag.
    4489  void InitDedicatedAllocation(
    4490  uint32_t memoryTypeIndex,
    4491  VkDeviceMemory hMemory,
    4492  VmaSuballocationType suballocationType,
    4493  void* pMappedData,
    4494  VkDeviceSize size)
    4495  {
    4496  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4497  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4498  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4499  m_Alignment = 0;
    4500  m_Size = size;
    4501  m_SuballocationType = (uint8_t)suballocationType;
    4502  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4503  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4504  m_DedicatedAllocation.m_hMemory = hMemory;
    4505  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4506  }
    4507 
    4508  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4509  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4510  VkDeviceSize GetSize() const { return m_Size; }
    4511  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4512  void* GetUserData() const { return m_pUserData; }
    4513  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4514  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4515 
    4516  VmaDeviceMemoryBlock* GetBlock() const
    4517  {
    4518  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4519  return m_BlockAllocation.m_Block;
    4520  }
    4521  VkDeviceSize GetOffset() const;
    4522  VkDeviceMemory GetMemory() const;
    4523  uint32_t GetMemoryTypeIndex() const;
    4524  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4525  void* GetMappedData() const;
    4526  bool CanBecomeLost() const;
    4527  VmaPool GetPool() const;
    4528 
    4529  uint32_t GetLastUseFrameIndex() const
    4530  {
    4531  return m_LastUseFrameIndex.load();
    4532  }
    4533  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4534  {
    4535  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4536  }
    4537  /*
    4538  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4539  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4540  - Else, returns false.
    4541 
    4542  If hAllocation is already lost, assert - you should not call it then.
    4543  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4544  */
    4545  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4546 
    4547  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4550  outInfo.blockCount = 1;
    4551  outInfo.allocationCount = 1;
    4552  outInfo.unusedRangeCount = 0;
    4553  outInfo.usedBytes = m_Size;
    4554  outInfo.unusedBytes = 0;
    4555  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4556  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4557  outInfo.unusedRangeSizeMax = 0;
    4558  }
    4559 
    4560  void BlockAllocMap();
    4561  void BlockAllocUnmap();
    4562  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4563  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4564 
    4565 #if VMA_STATS_STRING_ENABLED
    4566  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4567  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4568 
    4569  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4570  {
    4571  VMA_ASSERT(m_BufferImageUsage == 0);
    4572  m_BufferImageUsage = bufferImageUsage;
    4573  }
    4574 
    4575  void PrintParameters(class VmaJsonWriter& json) const;
    4576 #endif
    4577 
    4578 private:
    4579  VkDeviceSize m_Alignment;
    4580  VkDeviceSize m_Size;
    4581  void* m_pUserData;
    4582  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4583  uint8_t m_Type; // ALLOCATION_TYPE
    4584  uint8_t m_SuballocationType; // VmaSuballocationType
    4585  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4586  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4587  uint8_t m_MapCount;
    4588  uint8_t m_Flags; // enum FLAGS
    4589 
    4590  // Allocation out of VmaDeviceMemoryBlock.
    4591  struct BlockAllocation
    4592  {
    4593  VmaPool m_hPool; // Null if belongs to general memory.
    4594  VmaDeviceMemoryBlock* m_Block;
    4595  VkDeviceSize m_Offset;
    4596  bool m_CanBecomeLost;
    4597  };
    4598 
    4599  // Allocation for an object that has its own private VkDeviceMemory.
    4600  struct DedicatedAllocation
    4601  {
    4602  uint32_t m_MemoryTypeIndex;
    4603  VkDeviceMemory m_hMemory;
    4604  void* m_pMappedData; // Not null means memory is mapped.
    4605  };
    4606 
    4607  union
    4608  {
    4609  // Allocation out of VmaDeviceMemoryBlock.
    4610  BlockAllocation m_BlockAllocation;
    4611  // Allocation for an object that has its own private VkDeviceMemory.
    4612  DedicatedAllocation m_DedicatedAllocation;
    4613  };
    4614 
    4615 #if VMA_STATS_STRING_ENABLED
    4616  uint32_t m_CreationFrameIndex;
    4617  uint32_t m_BufferImageUsage; // 0 if unknown.
    4618 #endif
    4619 
    4620  void FreeUserDataString(VmaAllocator hAllocator);
    4621 };
    4622 
    4623 /*
    4624 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4625 allocated memory block or free.
    4626 */
    4627 struct VmaSuballocation
    4628 {
    4629  VkDeviceSize offset;
    4630  VkDeviceSize size;
    4631  VmaAllocation hAllocation;
    4632  VmaSuballocationType type;
    4633 };
    4634 
    4635 // Comparator for offsets.
    4636 struct VmaSuballocationOffsetLess
    4637 {
    4638  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4639  {
    4640  return lhs.offset < rhs.offset;
    4641  }
    4642 };
    4643 struct VmaSuballocationOffsetGreater
    4644 {
    4645  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4646  {
    4647  return lhs.offset > rhs.offset;
    4648  }
    4649 };
    4650 
    4651 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4652 
    4653 // Cost of one additional allocation lost, as equivalent in bytes.
    4654 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4655 
    4656 /*
    4657 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4658 
    4659 If canMakeOtherLost was false:
    4660 - item points to a FREE suballocation.
    4661 - itemsToMakeLostCount is 0.
    4662 
    4663 If canMakeOtherLost was true:
    4664 - item points to first of sequence of suballocations, which are either FREE,
    4665  or point to VmaAllocations that can become lost.
    4666 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4667  the requested allocation to succeed.
    4668 */
    4669 struct VmaAllocationRequest
    4670 {
    4671  VkDeviceSize offset;
    4672  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4673  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4674  VmaSuballocationList::iterator item;
    4675  size_t itemsToMakeLostCount;
    4676  void* customData;
    4677 
    4678  VkDeviceSize CalcCost() const
    4679  {
    4680  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4681  }
    4682 };
    4683 
    4684 /*
    4685 Data structure used for bookkeeping of allocations and unused ranges of memory
    4686 in a single VkDeviceMemory block.
    4687 */
    4688 class VmaBlockMetadata
    4689 {
    4690 public:
    4691  VmaBlockMetadata(VmaAllocator hAllocator);
    4692  virtual ~VmaBlockMetadata() { }
    4693  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4694 
    4695  // Validates all data structures inside this object. If not valid, returns false.
    4696  virtual bool Validate() const = 0;
    4697  VkDeviceSize GetSize() const { return m_Size; }
    4698  virtual size_t GetAllocationCount() const = 0;
    4699  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4700  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4701  // Returns true if this block is empty - contains only single free suballocation.
    4702  virtual bool IsEmpty() const = 0;
    4703 
    4704  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4705  // Shouldn't modify blockCount.
    4706  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4707 
    4708 #if VMA_STATS_STRING_ENABLED
    4709  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4710 #endif
    4711 
    4712  // Tries to find a place for suballocation with given parameters inside this block.
    4713  // If succeeded, fills pAllocationRequest and returns true.
    4714  // If failed, returns false.
    4715  virtual bool CreateAllocationRequest(
    4716  uint32_t currentFrameIndex,
    4717  uint32_t frameInUseCount,
    4718  VkDeviceSize bufferImageGranularity,
    4719  VkDeviceSize allocSize,
    4720  VkDeviceSize allocAlignment,
    4721  bool upperAddress,
    4722  VmaSuballocationType allocType,
    4723  bool canMakeOtherLost,
    4724  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4725  VmaAllocationRequest* pAllocationRequest) = 0;
    4726 
    4727  virtual bool MakeRequestedAllocationsLost(
    4728  uint32_t currentFrameIndex,
    4729  uint32_t frameInUseCount,
    4730  VmaAllocationRequest* pAllocationRequest) = 0;
    4731 
    4732  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4733 
    4734  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4735 
    4736  // Makes actual allocation based on request. Request must already be checked and valid.
    4737  virtual void Alloc(
    4738  const VmaAllocationRequest& request,
    4739  VmaSuballocationType type,
    4740  VkDeviceSize allocSize,
    4741  bool upperAddress,
    4742  VmaAllocation hAllocation) = 0;
    4743 
    4744  // Frees suballocation assigned to given memory region.
    4745  virtual void Free(const VmaAllocation allocation) = 0;
    4746  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4747 
    4748 protected:
    4749  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4750 
    4751 #if VMA_STATS_STRING_ENABLED
    4752  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4753  VkDeviceSize unusedBytes,
    4754  size_t allocationCount,
    4755  size_t unusedRangeCount) const;
    4756  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4757  VkDeviceSize offset,
    4758  VmaAllocation hAllocation) const;
    4759  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4760  VkDeviceSize offset,
    4761  VkDeviceSize size) const;
    4762  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4763 #endif
    4764 
    4765 private:
    4766  VkDeviceSize m_Size;
    4767  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4768 };
    4769 
    4770 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4771  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4772  return false; \
    4773  } } while(false)
    4774 
    4775 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4776 {
    4777  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4778 public:
    4779  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4780  virtual ~VmaBlockMetadata_Generic();
    4781  virtual void Init(VkDeviceSize size);
    4782 
    4783  virtual bool Validate() const;
    4784  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4785  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4786  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4787  virtual bool IsEmpty() const;
    4788 
    4789  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4790  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4791 
    4792 #if VMA_STATS_STRING_ENABLED
    4793  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4794 #endif
    4795 
    4796  virtual bool CreateAllocationRequest(
    4797  uint32_t currentFrameIndex,
    4798  uint32_t frameInUseCount,
    4799  VkDeviceSize bufferImageGranularity,
    4800  VkDeviceSize allocSize,
    4801  VkDeviceSize allocAlignment,
    4802  bool upperAddress,
    4803  VmaSuballocationType allocType,
    4804  bool canMakeOtherLost,
    4805  uint32_t strategy,
    4806  VmaAllocationRequest* pAllocationRequest);
    4807 
    4808  virtual bool MakeRequestedAllocationsLost(
    4809  uint32_t currentFrameIndex,
    4810  uint32_t frameInUseCount,
    4811  VmaAllocationRequest* pAllocationRequest);
    4812 
    4813  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4814 
    4815  virtual VkResult CheckCorruption(const void* pBlockData);
    4816 
    4817  virtual void Alloc(
    4818  const VmaAllocationRequest& request,
    4819  VmaSuballocationType type,
    4820  VkDeviceSize allocSize,
    4821  bool upperAddress,
    4822  VmaAllocation hAllocation);
    4823 
    4824  virtual void Free(const VmaAllocation allocation);
    4825  virtual void FreeAtOffset(VkDeviceSize offset);
    4826 
    4827 private:
    4828  uint32_t m_FreeCount;
    4829  VkDeviceSize m_SumFreeSize;
    4830  VmaSuballocationList m_Suballocations;
    4831  // Suballocations that are free and have size greater than certain threshold.
    4832  // Sorted by size, ascending.
    4833  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4834 
    4835  bool ValidateFreeSuballocationList() const;
    4836 
    4837  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4838  // If yes, fills pOffset and returns true. If no, returns false.
    4839  bool CheckAllocation(
    4840  uint32_t currentFrameIndex,
    4841  uint32_t frameInUseCount,
    4842  VkDeviceSize bufferImageGranularity,
    4843  VkDeviceSize allocSize,
    4844  VkDeviceSize allocAlignment,
    4845  VmaSuballocationType allocType,
    4846  VmaSuballocationList::const_iterator suballocItem,
    4847  bool canMakeOtherLost,
    4848  VkDeviceSize* pOffset,
    4849  size_t* itemsToMakeLostCount,
    4850  VkDeviceSize* pSumFreeSize,
    4851  VkDeviceSize* pSumItemSize) const;
    4852  // Given free suballocation, it merges it with following one, which must also be free.
    4853  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4854  // Releases given suballocation, making it free.
    4855  // Merges it with adjacent free suballocations if applicable.
    4856  // Returns iterator to new free suballocation at this place.
    4857  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4858  // Given free suballocation, it inserts it into sorted list of
    4859  // m_FreeSuballocationsBySize if it's suitable.
    4860  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4861  // Given free suballocation, it removes it from sorted list of
    4862  // m_FreeSuballocationsBySize if it's suitable.
    4863  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4864 };
    4865 
    4866 /*
    4867 Allocations and their references in internal data structure look like this:
    4868 
    4869 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4870 
    4871  0 +-------+
    4872  | |
    4873  | |
    4874  | |
    4875  +-------+
    4876  | Alloc | 1st[m_1stNullItemsBeginCount]
    4877  +-------+
    4878  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4879  +-------+
    4880  | ... |
    4881  +-------+
    4882  | Alloc | 1st[1st.size() - 1]
    4883  +-------+
    4884  | |
    4885  | |
    4886  | |
    4887 GetSize() +-------+
    4888 
    4889 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4890 
    4891  0 +-------+
    4892  | Alloc | 2nd[0]
    4893  +-------+
    4894  | Alloc | 2nd[1]
    4895  +-------+
    4896  | ... |
    4897  +-------+
    4898  | Alloc | 2nd[2nd.size() - 1]
    4899  +-------+
    4900  | |
    4901  | |
    4902  | |
    4903  +-------+
    4904  | Alloc | 1st[m_1stNullItemsBeginCount]
    4905  +-------+
    4906  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4907  +-------+
    4908  | ... |
    4909  +-------+
    4910  | Alloc | 1st[1st.size() - 1]
    4911  +-------+
    4912  | |
    4913 GetSize() +-------+
    4914 
    4915 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4916 
    4917  0 +-------+
    4918  | |
    4919  | |
    4920  | |
    4921  +-------+
    4922  | Alloc | 1st[m_1stNullItemsBeginCount]
    4923  +-------+
    4924  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4925  +-------+
    4926  | ... |
    4927  +-------+
    4928  | Alloc | 1st[1st.size() - 1]
    4929  +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 2nd[2nd.size() - 1]
    4935  +-------+
    4936  | ... |
    4937  +-------+
    4938  | Alloc | 2nd[1]
    4939  +-------+
    4940  | Alloc | 2nd[0]
    4941 GetSize() +-------+
    4942 
    4943 */
    4944 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4945 {
    4946  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4947 public:
    4948  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4949  virtual ~VmaBlockMetadata_Linear();
    4950  virtual void Init(VkDeviceSize size);
    4951 
    4952  virtual bool Validate() const;
    4953  virtual size_t GetAllocationCount() const;
    4954  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4955  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4956  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4957 
    4958  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4959  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4960 
    4961 #if VMA_STATS_STRING_ENABLED
    4962  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4963 #endif
    4964 
    4965  virtual bool CreateAllocationRequest(
    4966  uint32_t currentFrameIndex,
    4967  uint32_t frameInUseCount,
    4968  VkDeviceSize bufferImageGranularity,
    4969  VkDeviceSize allocSize,
    4970  VkDeviceSize allocAlignment,
    4971  bool upperAddress,
    4972  VmaSuballocationType allocType,
    4973  bool canMakeOtherLost,
    4974  uint32_t strategy,
    4975  VmaAllocationRequest* pAllocationRequest);
    4976 
    4977  virtual bool MakeRequestedAllocationsLost(
    4978  uint32_t currentFrameIndex,
    4979  uint32_t frameInUseCount,
    4980  VmaAllocationRequest* pAllocationRequest);
    4981 
    4982  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4983 
    4984  virtual VkResult CheckCorruption(const void* pBlockData);
    4985 
    4986  virtual void Alloc(
    4987  const VmaAllocationRequest& request,
    4988  VmaSuballocationType type,
    4989  VkDeviceSize allocSize,
    4990  bool upperAddress,
    4991  VmaAllocation hAllocation);
    4992 
    4993  virtual void Free(const VmaAllocation allocation);
    4994  virtual void FreeAtOffset(VkDeviceSize offset);
    4995 
    4996 private:
    4997  /*
    4998  There are two suballocation vectors, used in ping-pong way.
    4999  The one with index m_1stVectorIndex is called 1st.
    5000  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5001  2nd can be non-empty only when 1st is not empty.
    5002  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5003  */
    5004  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5005 
    5006  enum SECOND_VECTOR_MODE
    5007  {
    5008  SECOND_VECTOR_EMPTY,
    5009  /*
    5010  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5011  all have smaller offset.
    5012  */
    5013  SECOND_VECTOR_RING_BUFFER,
    5014  /*
    5015  Suballocations in 2nd vector are upper side of double stack.
    5016  They all have offsets higher than those in 1st vector.
    5017  Top of this stack means smaller offsets, but higher indices in this vector.
    5018  */
    5019  SECOND_VECTOR_DOUBLE_STACK,
    5020  };
    5021 
    5022  VkDeviceSize m_SumFreeSize;
    5023  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5024  uint32_t m_1stVectorIndex;
    5025  SECOND_VECTOR_MODE m_2ndVectorMode;
    5026 
    5027  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5028  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5029  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5030  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5031 
    5032  // Number of items in 1st vector with hAllocation = null at the beginning.
    5033  size_t m_1stNullItemsBeginCount;
    5034  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5035  size_t m_1stNullItemsMiddleCount;
    5036  // Number of items in 2nd vector with hAllocation = null.
    5037  size_t m_2ndNullItemsCount;
    5038 
    5039  bool ShouldCompact1st() const;
    5040  void CleanupAfterFree();
    5041 };
    5042 
    5043 /*
    5044 - GetSize() is the original size of allocated memory block.
    5045 - m_UsableSize is this size aligned down to a power of two.
    5046  All allocations and calculations happen relative to m_UsableSize.
    5047 - GetUnusableSize() is the difference between them.
    5048  It is repoted as separate, unused range, not available for allocations.
    5049 
    5050 Node at level 0 has size = m_UsableSize.
    5051 Each next level contains nodes with size 2 times smaller than current level.
    5052 m_LevelCount is the maximum number of levels to use in the current object.
    5053 */
    5054 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5055 {
    5056  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5057 public:
    5058  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5059  virtual ~VmaBlockMetadata_Buddy();
    5060  virtual void Init(VkDeviceSize size);
    5061 
    5062  virtual bool Validate() const;
    5063  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5064  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5065  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5066  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5067 
    5068  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5069  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5070 
    5071 #if VMA_STATS_STRING_ENABLED
    5072  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5073 #endif
    5074 
    5075  virtual bool CreateAllocationRequest(
    5076  uint32_t currentFrameIndex,
    5077  uint32_t frameInUseCount,
    5078  VkDeviceSize bufferImageGranularity,
    5079  VkDeviceSize allocSize,
    5080  VkDeviceSize allocAlignment,
    5081  bool upperAddress,
    5082  VmaSuballocationType allocType,
    5083  bool canMakeOtherLost,
    5084  uint32_t strategy,
    5085  VmaAllocationRequest* pAllocationRequest);
    5086 
    5087  virtual bool MakeRequestedAllocationsLost(
    5088  uint32_t currentFrameIndex,
    5089  uint32_t frameInUseCount,
    5090  VmaAllocationRequest* pAllocationRequest);
    5091 
    5092  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5093 
    5094  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5095 
    5096  virtual void Alloc(
    5097  const VmaAllocationRequest& request,
    5098  VmaSuballocationType type,
    5099  VkDeviceSize allocSize,
    5100  bool upperAddress,
    5101  VmaAllocation hAllocation);
    5102 
    5103  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5104  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5105 
    5106 private:
    5107  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5108  static const size_t MAX_LEVELS = 30;
    5109 
    5110  struct ValidationContext
    5111  {
    5112  size_t calculatedAllocationCount;
    5113  size_t calculatedFreeCount;
    5114  VkDeviceSize calculatedSumFreeSize;
    5115 
    5116  ValidationContext() :
    5117  calculatedAllocationCount(0),
    5118  calculatedFreeCount(0),
    5119  calculatedSumFreeSize(0) { }
    5120  };
    5121 
    5122  struct Node
    5123  {
    5124  VkDeviceSize offset;
    5125  enum TYPE
    5126  {
    5127  TYPE_FREE,
    5128  TYPE_ALLOCATION,
    5129  TYPE_SPLIT,
    5130  TYPE_COUNT
    5131  } type;
    5132  Node* parent;
    5133  Node* buddy;
    5134 
    5135  union
    5136  {
    5137  struct
    5138  {
    5139  Node* prev;
    5140  Node* next;
    5141  } free;
    5142  struct
    5143  {
    5144  VmaAllocation alloc;
    5145  } allocation;
    5146  struct
    5147  {
    5148  Node* leftChild;
    5149  } split;
    5150  };
    5151  };
    5152 
    5153  // Size of the memory block aligned down to a power of two.
    5154  VkDeviceSize m_UsableSize;
    5155  uint32_t m_LevelCount;
    5156 
    5157  Node* m_Root;
    5158  struct {
    5159  Node* front;
    5160  Node* back;
    5161  } m_FreeList[MAX_LEVELS];
    5162  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5163  size_t m_AllocationCount;
    5164  // Number of nodes in the tree with type == TYPE_FREE.
    5165  size_t m_FreeCount;
    5166  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5167  VkDeviceSize m_SumFreeSize;
    5168 
    5169  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5170  void DeleteNode(Node* node);
    5171  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5172  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5173  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5174  // Alloc passed just for validation. Can be null.
    5175  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5176  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5177  // Adds node to the front of FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next can be undefined.
    5180  void AddToFreeListFront(uint32_t level, Node* node);
    5181  // Removes node from FreeList at given level.
    5182  // node->type must be FREE.
    5183  // node->free.prev, next stay untouched.
    5184  void RemoveFromFreeList(uint32_t level, Node* node);
    5185 
    5186 #if VMA_STATS_STRING_ENABLED
    5187  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5188 #endif
    5189 };
    5190 
    5191 /*
    5192 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5193 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5194 
    5195 Thread-safety: This class must be externally synchronized.
    5196 */
    5197 class VmaDeviceMemoryBlock
    5198 {
    5199  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5200 public:
    5201  VmaBlockMetadata* m_pMetadata;
    5202 
    5203  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5204 
    5205  ~VmaDeviceMemoryBlock()
    5206  {
    5207  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5208  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5209  }
    5210 
    5211  // Always call after construction.
    5212  void Init(
    5213  VmaAllocator hAllocator,
    5214  uint32_t newMemoryTypeIndex,
    5215  VkDeviceMemory newMemory,
    5216  VkDeviceSize newSize,
    5217  uint32_t id,
    5218  uint32_t algorithm);
    5219  // Always call before destruction.
    5220  void Destroy(VmaAllocator allocator);
    5221 
    5222  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5223  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5224  uint32_t GetId() const { return m_Id; }
    5225  void* GetMappedData() const { return m_pMappedData; }
    5226 
    5227  // Validates all data structures inside this object. If not valid, returns false.
    5228  bool Validate() const;
    5229 
    5230  VkResult CheckCorruption(VmaAllocator hAllocator);
    5231 
    5232  // ppData can be null.
    5233  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5234  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5235 
    5236  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5237  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5238 
    5239  VkResult BindBufferMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkBuffer hBuffer);
    5243  VkResult BindImageMemory(
    5244  const VmaAllocator hAllocator,
    5245  const VmaAllocation hAllocation,
    5246  VkImage hImage);
    5247 
    5248 private:
    5249  uint32_t m_MemoryTypeIndex;
    5250  uint32_t m_Id;
    5251  VkDeviceMemory m_hMemory;
    5252 
    5253  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5254  // Also protects m_MapCount, m_pMappedData.
    5255  VMA_MUTEX m_Mutex;
    5256  uint32_t m_MapCount;
    5257  void* m_pMappedData;
    5258 };
    5259 
    5260 struct VmaPointerLess
    5261 {
    5262  bool operator()(const void* lhs, const void* rhs) const
    5263  {
    5264  return lhs < rhs;
    5265  }
    5266 };
    5267 
    5268 class VmaDefragmentator;
    5269 
    5270 /*
    5271 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5272 Vulkan memory type.
    5273 
    5274 Synchronized internally with a mutex.
    5275 */
    5276 struct VmaBlockVector
    5277 {
    5278  VMA_CLASS_NO_COPY(VmaBlockVector)
    5279 public:
    5280  VmaBlockVector(
    5281  VmaAllocator hAllocator,
    5282  uint32_t memoryTypeIndex,
    5283  VkDeviceSize preferredBlockSize,
    5284  size_t minBlockCount,
    5285  size_t maxBlockCount,
    5286  VkDeviceSize bufferImageGranularity,
    5287  uint32_t frameInUseCount,
    5288  bool isCustomPool,
    5289  bool explicitBlockSize,
    5290  uint32_t algorithm);
    5291  ~VmaBlockVector();
    5292 
    5293  VkResult CreateMinBlocks();
    5294 
    5295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5299  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5300 
    5301  void GetPoolStats(VmaPoolStats* pStats);
    5302 
    5303  bool IsEmpty() const { return m_Blocks.empty(); }
    5304  bool IsCorruptionDetectionEnabled() const;
    5305 
    5306  VkResult Allocate(
    5307  VmaPool hCurrentPool,
    5308  uint32_t currentFrameIndex,
    5309  VkDeviceSize size,
    5310  VkDeviceSize alignment,
    5311  const VmaAllocationCreateInfo& createInfo,
    5312  VmaSuballocationType suballocType,
    5313  VmaAllocation* pAllocation);
    5314 
    5315  void Free(
    5316  VmaAllocation hAllocation);
    5317 
    5318  // Adds statistics of this BlockVector to pStats.
    5319  void AddStats(VmaStats* pStats);
    5320 
    5321 #if VMA_STATS_STRING_ENABLED
    5322  void PrintDetailedMap(class VmaJsonWriter& json);
    5323 #endif
    5324 
    5325  void MakePoolAllocationsLost(
    5326  uint32_t currentFrameIndex,
    5327  size_t* pLostAllocationCount);
    5328  VkResult CheckCorruption();
    5329 
    5330  VmaDefragmentator* EnsureDefragmentator(
    5331  VmaAllocator hAllocator,
    5332  uint32_t currentFrameIndex);
    5333 
    5334  VkResult Defragment(
    5335  VmaDefragmentationStats* pDefragmentationStats,
    5336  VkDeviceSize& maxBytesToMove,
    5337  uint32_t& maxAllocationsToMove);
    5338 
    5339  void DestroyDefragmentator();
    5340 
    5341 private:
    5342  friend class VmaDefragmentator;
    5343 
    5344  const VmaAllocator m_hAllocator;
    5345  const uint32_t m_MemoryTypeIndex;
    5346  const VkDeviceSize m_PreferredBlockSize;
    5347  const size_t m_MinBlockCount;
    5348  const size_t m_MaxBlockCount;
    5349  const VkDeviceSize m_BufferImageGranularity;
    5350  const uint32_t m_FrameInUseCount;
    5351  const bool m_IsCustomPool;
    5352  const bool m_ExplicitBlockSize;
    5353  const uint32_t m_Algorithm;
    5354  bool m_HasEmptyBlock;
    5355  VMA_MUTEX m_Mutex;
    5356  // Incrementally sorted by sumFreeSize, ascending.
    5357  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5358  /* There can be at most one allocation that is completely empty - a
    5359  hysteresis to avoid pessimistic case of alternating creation and destruction
    5360  of a VkDeviceMemory. */
    5361  VmaDefragmentator* m_pDefragmentator;
    5362  uint32_t m_NextBlockId;
    5363 
    5364  VkDeviceSize CalcMaxBlockSize() const;
    5365 
    5366  // Finds and removes given block from vector.
    5367  void Remove(VmaDeviceMemoryBlock* pBlock);
    5368 
    5369  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5370  // after this call.
    5371  void IncrementallySortBlocks();
    5372 
    5373  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5374  VkResult AllocateFromBlock(
    5375  VmaDeviceMemoryBlock* pBlock,
    5376  VmaPool hCurrentPool,
    5377  uint32_t currentFrameIndex,
    5378  VkDeviceSize size,
    5379  VkDeviceSize alignment,
    5380  VmaAllocationCreateFlags allocFlags,
    5381  void* pUserData,
    5382  VmaSuballocationType suballocType,
    5383  uint32_t strategy,
    5384  VmaAllocation* pAllocation);
    5385 
    5386  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5387 };
    5388 
    5389 struct VmaPool_T
    5390 {
    5391  VMA_CLASS_NO_COPY(VmaPool_T)
    5392 public:
    5393  VmaBlockVector m_BlockVector;
    5394 
    5395  VmaPool_T(
    5396  VmaAllocator hAllocator,
    5397  const VmaPoolCreateInfo& createInfo,
    5398  VkDeviceSize preferredBlockSize);
    5399  ~VmaPool_T();
    5400 
    5401  uint32_t GetId() const { return m_Id; }
    5402  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5403 
    5404 #if VMA_STATS_STRING_ENABLED
    5405  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5406 #endif
    5407 
    5408 private:
    5409  uint32_t m_Id;
    5410 };
    5411 
    5412 class VmaDefragmentator
    5413 {
    5414  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5415 private:
    5416  const VmaAllocator m_hAllocator;
    5417  VmaBlockVector* const m_pBlockVector;
    5418  uint32_t m_CurrentFrameIndex;
    5419  VkDeviceSize m_BytesMoved;
    5420  uint32_t m_AllocationsMoved;
    5421 
    5422  struct AllocationInfo
    5423  {
    5424  VmaAllocation m_hAllocation;
    5425  VkBool32* m_pChanged;
    5426 
    5427  AllocationInfo() :
    5428  m_hAllocation(VK_NULL_HANDLE),
    5429  m_pChanged(VMA_NULL)
    5430  {
    5431  }
    5432  };
    5433 
    5434  struct AllocationInfoSizeGreater
    5435  {
    5436  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5437  {
    5438  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5439  }
    5440  };
    5441 
    5442  // Used between AddAllocation and Defragment.
    5443  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5444 
    5445  struct BlockInfo
    5446  {
    5447  VmaDeviceMemoryBlock* m_pBlock;
    5448  bool m_HasNonMovableAllocations;
    5449  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5450 
    5451  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5452  m_pBlock(VMA_NULL),
    5453  m_HasNonMovableAllocations(true),
    5454  m_Allocations(pAllocationCallbacks),
    5455  m_pMappedDataForDefragmentation(VMA_NULL)
    5456  {
    5457  }
    5458 
    5459  void CalcHasNonMovableAllocations()
    5460  {
    5461  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5462  const size_t defragmentAllocCount = m_Allocations.size();
    5463  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5464  }
    5465 
    5466  void SortAllocationsBySizeDescecnding()
    5467  {
    5468  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5469  }
    5470 
    5471  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5472  void Unmap(VmaAllocator hAllocator);
    5473 
    5474  private:
    5475  // Not null if mapped for defragmentation only, not originally mapped.
    5476  void* m_pMappedDataForDefragmentation;
    5477  };
    5478 
    5479  struct BlockPointerLess
    5480  {
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5484  }
    5485  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5486  {
    5487  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5488  }
    5489  };
    5490 
    5491  // 1. Blocks with some non-movable allocations go first.
    5492  // 2. Blocks with smaller sumFreeSize go first.
    5493  struct BlockInfoCompareMoveDestination
    5494  {
    5495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5496  {
    5497  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return true;
    5500  }
    5501  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5502  {
    5503  return false;
    5504  }
    5505  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5506  {
    5507  return true;
    5508  }
    5509  return false;
    5510  }
    5511  };
    5512 
    5513  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5514  BlockInfoVector m_Blocks;
    5515 
    5516  VkResult DefragmentRound(
    5517  VkDeviceSize maxBytesToMove,
    5518  uint32_t maxAllocationsToMove);
    5519 
    5520  static bool MoveMakesSense(
    5521  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5522  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5523 
    5524 public:
    5525  VmaDefragmentator(
    5526  VmaAllocator hAllocator,
    5527  VmaBlockVector* pBlockVector,
    5528  uint32_t currentFrameIndex);
    5529 
    5530  ~VmaDefragmentator();
    5531 
    5532  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5533  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5534 
    5535  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5536 
    5537  VkResult Defragment(
    5538  VkDeviceSize maxBytesToMove,
    5539  uint32_t maxAllocationsToMove);
    5540 };
    5541 
    5542 #if VMA_RECORDING_ENABLED
    5543 
    5544 class VmaRecorder
    5545 {
    5546 public:
    5547  VmaRecorder();
    5548  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5549  void WriteConfiguration(
    5550  const VkPhysicalDeviceProperties& devProps,
    5551  const VkPhysicalDeviceMemoryProperties& memProps,
    5552  bool dedicatedAllocationExtensionEnabled);
    5553  ~VmaRecorder();
    5554 
    5555  void RecordCreateAllocator(uint32_t frameIndex);
    5556  void RecordDestroyAllocator(uint32_t frameIndex);
    5557  void RecordCreatePool(uint32_t frameIndex,
    5558  const VmaPoolCreateInfo& createInfo,
    5559  VmaPool pool);
    5560  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5561  void RecordAllocateMemory(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  const VmaAllocationCreateInfo& createInfo,
    5564  VmaAllocation allocation);
    5565  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5566  const VkMemoryRequirements& vkMemReq,
    5567  bool requiresDedicatedAllocation,
    5568  bool prefersDedicatedAllocation,
    5569  const VmaAllocationCreateInfo& createInfo,
    5570  VmaAllocation allocation);
    5571  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5572  const VkMemoryRequirements& vkMemReq,
    5573  bool requiresDedicatedAllocation,
    5574  bool prefersDedicatedAllocation,
    5575  const VmaAllocationCreateInfo& createInfo,
    5576  VmaAllocation allocation);
    5577  void RecordFreeMemory(uint32_t frameIndex,
    5578  VmaAllocation allocation);
    5579  void RecordSetAllocationUserData(uint32_t frameIndex,
    5580  VmaAllocation allocation,
    5581  const void* pUserData);
    5582  void RecordCreateLostAllocation(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordMapMemory(uint32_t frameIndex,
    5585  VmaAllocation allocation);
    5586  void RecordUnmapMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordFlushAllocation(uint32_t frameIndex,
    5589  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5590  void RecordInvalidateAllocation(uint32_t frameIndex,
    5591  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5592  void RecordCreateBuffer(uint32_t frameIndex,
    5593  const VkBufferCreateInfo& bufCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordCreateImage(uint32_t frameIndex,
    5597  const VkImageCreateInfo& imageCreateInfo,
    5598  const VmaAllocationCreateInfo& allocCreateInfo,
    5599  VmaAllocation allocation);
    5600  void RecordDestroyBuffer(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordDestroyImage(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordTouchAllocation(uint32_t frameIndex,
    5605  VmaAllocation allocation);
    5606  void RecordGetAllocationInfo(uint32_t frameIndex,
    5607  VmaAllocation allocation);
    5608  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5609  VmaPool pool);
    5610 
    5611 private:
    5612  struct CallParams
    5613  {
    5614  uint32_t threadId;
    5615  double time;
    5616  };
    5617 
    5618  class UserDataString
    5619  {
    5620  public:
    5621  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5622  const char* GetString() const { return m_Str; }
    5623 
    5624  private:
    5625  char m_PtrStr[17];
    5626  const char* m_Str;
    5627  };
    5628 
    5629  bool m_UseMutex;
    5630  VmaRecordFlags m_Flags;
    5631  FILE* m_File;
    5632  VMA_MUTEX m_FileMutex;
    5633  int64_t m_Freq;
    5634  int64_t m_StartCounter;
    5635 
    5636  void GetBasicParams(CallParams& outParams);
    5637  void Flush();
    5638 };
    5639 
    5640 #endif // #if VMA_RECORDING_ENABLED
    5641 
    5642 // Main allocator object.
    5643 struct VmaAllocator_T
    5644 {
    5645  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5646 public:
    5647  bool m_UseMutex;
    5648  bool m_UseKhrDedicatedAllocation;
    5649  VkDevice m_hDevice;
    5650  bool m_AllocationCallbacksSpecified;
    5651  VkAllocationCallbacks m_AllocationCallbacks;
    5652  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5653 
    5654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5656  VMA_MUTEX m_HeapSizeLimitMutex;
    5657 
    5658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5659  VkPhysicalDeviceMemoryProperties m_MemProps;
    5660 
    5661  // Default pools.
    5662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5663 
    5664  // Each vector is sorted by memory (handle value).
    5665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5667  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5668 
    5669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5671  ~VmaAllocator_T();
    5672 
    5673  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5674  {
    5675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5676  }
    5677  const VmaVulkanFunctions& GetVulkanFunctions() const
    5678  {
    5679  return m_VulkanFunctions;
    5680  }
    5681 
    5682  VkDeviceSize GetBufferImageGranularity() const
    5683  {
    5684  return VMA_MAX(
    5685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5687  }
    5688 
    5689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5691 
    5692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5693  {
    5694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5696  }
    5697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5699  {
    5700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5702  }
    5703  // Minimum alignment for all allocations in specific memory type.
    5704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5705  {
    5706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5709  }
    5710 
    5711  bool IsIntegratedGpu() const
    5712  {
    5713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5714  }
    5715 
    5716 #if VMA_RECORDING_ENABLED
    5717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5718 #endif
    5719 
    5720  void GetBufferMemoryRequirements(
    5721  VkBuffer hBuffer,
    5722  VkMemoryRequirements& memReq,
    5723  bool& requiresDedicatedAllocation,
    5724  bool& prefersDedicatedAllocation) const;
    5725  void GetImageMemoryRequirements(
    5726  VkImage hImage,
    5727  VkMemoryRequirements& memReq,
    5728  bool& requiresDedicatedAllocation,
    5729  bool& prefersDedicatedAllocation) const;
    5730 
    5731  // Main allocation function.
    5732  VkResult AllocateMemory(
    5733  const VkMemoryRequirements& vkMemReq,
    5734  bool requiresDedicatedAllocation,
    5735  bool prefersDedicatedAllocation,
    5736  VkBuffer dedicatedBuffer,
    5737  VkImage dedicatedImage,
    5738  const VmaAllocationCreateInfo& createInfo,
    5739  VmaSuballocationType suballocType,
    5740  VmaAllocation* pAllocation);
    5741 
    5742  // Main deallocation function.
    5743  void FreeMemory(const VmaAllocation allocation);
    5744 
    5745  void CalculateStats(VmaStats* pStats);
    5746 
    5747 #if VMA_STATS_STRING_ENABLED
    5748  void PrintDetailedMap(class VmaJsonWriter& json);
    5749 #endif
    5750 
    5751  VkResult Defragment(
    5752  VmaAllocation* pAllocations,
    5753  size_t allocationCount,
    5754  VkBool32* pAllocationsChanged,
    5755  const VmaDefragmentationInfo* pDefragmentationInfo,
    5756  VmaDefragmentationStats* pDefragmentationStats);
    5757 
    5758  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5759  bool TouchAllocation(VmaAllocation hAllocation);
    5760 
    5761  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5762  void DestroyPool(VmaPool pool);
    5763  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5764 
    5765  void SetCurrentFrameIndex(uint32_t frameIndex);
    5766  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5767 
    5768  void MakePoolAllocationsLost(
    5769  VmaPool hPool,
    5770  size_t* pLostAllocationCount);
    5771  VkResult CheckPoolCorruption(VmaPool hPool);
    5772  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5773 
    5774  void CreateLostAllocation(VmaAllocation* pAllocation);
    5775 
    5776  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5777  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5778 
    5779  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5780  void Unmap(VmaAllocation hAllocation);
    5781 
    5782  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5783  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5784 
    5785  void FlushOrInvalidateAllocation(
    5786  VmaAllocation hAllocation,
    5787  VkDeviceSize offset, VkDeviceSize size,
    5788  VMA_CACHE_OPERATION op);
    5789 
    5790  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5791 
    5792 private:
    5793  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5794 
    5795  VkPhysicalDevice m_PhysicalDevice;
    5796  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5797 
    5798  VMA_MUTEX m_PoolsMutex;
    5799  // Protected by m_PoolsMutex. Sorted by pointer value.
    5800  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5801  uint32_t m_NextPoolId;
    5802 
    5803  VmaVulkanFunctions m_VulkanFunctions;
    5804 
    5805 #if VMA_RECORDING_ENABLED
    5806  VmaRecorder* m_pRecorder;
    5807 #endif
    5808 
    5809  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5810 
    5811  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5812 
    5813  VkResult AllocateMemoryOfType(
    5814  VkDeviceSize size,
    5815  VkDeviceSize alignment,
    5816  bool dedicatedAllocation,
    5817  VkBuffer dedicatedBuffer,
    5818  VkImage dedicatedImage,
    5819  const VmaAllocationCreateInfo& createInfo,
    5820  uint32_t memTypeIndex,
    5821  VmaSuballocationType suballocType,
    5822  VmaAllocation* pAllocation);
    5823 
    5824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5825  VkResult AllocateDedicatedMemory(
    5826  VkDeviceSize size,
    5827  VmaSuballocationType suballocType,
    5828  uint32_t memTypeIndex,
    5829  bool map,
    5830  bool isUserDataString,
    5831  void* pUserData,
    5832  VkBuffer dedicatedBuffer,
    5833  VkImage dedicatedImage,
    5834  VmaAllocation* pAllocation);
    5835 
    5836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5837  void FreeDedicatedMemory(VmaAllocation allocation);
    5838 };
    5839 
    5841 // Memory allocation #2 after VmaAllocator_T definition
    5842 
    5843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5844 {
    5845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5846 }
    5847 
    5848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5849 {
    5850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5851 }
    5852 
    5853 template<typename T>
    5854 static T* VmaAllocate(VmaAllocator hAllocator)
    5855 {
    5856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5857 }
    5858 
    5859 template<typename T>
    5860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5861 {
    5862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5863 }
    5864 
    5865 template<typename T>
    5866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5867 {
    5868  if(ptr != VMA_NULL)
    5869  {
    5870  ptr->~T();
    5871  VmaFree(hAllocator, ptr);
    5872  }
    5873 }
    5874 
    5875 template<typename T>
    5876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5877 {
    5878  if(ptr != VMA_NULL)
    5879  {
    5880  for(size_t i = count; i--; )
    5881  ptr[i].~T();
    5882  VmaFree(hAllocator, ptr);
    5883  }
    5884 }
    5885 
    5887 // VmaStringBuilder
    5888 
    5889 #if VMA_STATS_STRING_ENABLED
    5890 
    5891 class VmaStringBuilder
    5892 {
    5893 public:
    5894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5895  size_t GetLength() const { return m_Data.size(); }
    5896  const char* GetData() const { return m_Data.data(); }
    5897 
    5898  void Add(char ch) { m_Data.push_back(ch); }
    5899  void Add(const char* pStr);
    5900  void AddNewLine() { Add('\n'); }
    5901  void AddNumber(uint32_t num);
    5902  void AddNumber(uint64_t num);
    5903  void AddPointer(const void* ptr);
    5904 
    5905 private:
    5906  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5907 };
    5908 
    5909 void VmaStringBuilder::Add(const char* pStr)
    5910 {
    5911  const size_t strLen = strlen(pStr);
    5912  if(strLen > 0)
    5913  {
    5914  const size_t oldCount = m_Data.size();
    5915  m_Data.resize(oldCount + strLen);
    5916  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5917  }
    5918 }
    5919 
    5920 void VmaStringBuilder::AddNumber(uint32_t num)
    5921 {
    5922  char buf[11];
    5923  VmaUint32ToStr(buf, sizeof(buf), num);
    5924  Add(buf);
    5925 }
    5926 
    5927 void VmaStringBuilder::AddNumber(uint64_t num)
    5928 {
    5929  char buf[21];
    5930  VmaUint64ToStr(buf, sizeof(buf), num);
    5931  Add(buf);
    5932 }
    5933 
    5934 void VmaStringBuilder::AddPointer(const void* ptr)
    5935 {
    5936  char buf[21];
    5937  VmaPtrToStr(buf, sizeof(buf), ptr);
    5938  Add(buf);
    5939 }
    5940 
    5941 #endif // #if VMA_STATS_STRING_ENABLED
    5942 
    5944 // VmaJsonWriter
    5945 
    5946 #if VMA_STATS_STRING_ENABLED
    5947 
    5948 class VmaJsonWriter
    5949 {
    5950  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5951 public:
    5952  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5953  ~VmaJsonWriter();
    5954 
    5955  void BeginObject(bool singleLine = false);
    5956  void EndObject();
    5957 
    5958  void BeginArray(bool singleLine = false);
    5959  void EndArray();
    5960 
    5961  void WriteString(const char* pStr);
    5962  void BeginString(const char* pStr = VMA_NULL);
    5963  void ContinueString(const char* pStr);
    5964  void ContinueString(uint32_t n);
    5965  void ContinueString(uint64_t n);
    5966  void ContinueString_Pointer(const void* ptr);
    5967  void EndString(const char* pStr = VMA_NULL);
    5968 
    5969  void WriteNumber(uint32_t n);
    5970  void WriteNumber(uint64_t n);
    5971  void WriteBool(bool b);
    5972  void WriteNull();
    5973 
    5974 private:
    5975  static const char* const INDENT;
    5976 
    5977  enum COLLECTION_TYPE
    5978  {
    5979  COLLECTION_TYPE_OBJECT,
    5980  COLLECTION_TYPE_ARRAY,
    5981  };
    5982  struct StackItem
    5983  {
    5984  COLLECTION_TYPE type;
    5985  uint32_t valueCount;
    5986  bool singleLineMode;
    5987  };
    5988 
    5989  VmaStringBuilder& m_SB;
    5990  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5991  bool m_InsideString;
    5992 
    5993  void BeginValue(bool isString);
    5994  void WriteIndent(bool oneLess = false);
    5995 };
    5996 
    5997 const char* const VmaJsonWriter::INDENT = " ";
    5998 
    5999 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6000  m_SB(sb),
    6001  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6002  m_InsideString(false)
    6003 {
    6004 }
    6005 
    6006 VmaJsonWriter::~VmaJsonWriter()
    6007 {
    6008  VMA_ASSERT(!m_InsideString);
    6009  VMA_ASSERT(m_Stack.empty());
    6010 }
    6011 
    6012 void VmaJsonWriter::BeginObject(bool singleLine)
    6013 {
    6014  VMA_ASSERT(!m_InsideString);
    6015 
    6016  BeginValue(false);
    6017  m_SB.Add('{');
    6018 
    6019  StackItem item;
    6020  item.type = COLLECTION_TYPE_OBJECT;
    6021  item.valueCount = 0;
    6022  item.singleLineMode = singleLine;
    6023  m_Stack.push_back(item);
    6024 }
    6025 
    6026 void VmaJsonWriter::EndObject()
    6027 {
    6028  VMA_ASSERT(!m_InsideString);
    6029 
    6030  WriteIndent(true);
    6031  m_SB.Add('}');
    6032 
    6033  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6034  m_Stack.pop_back();
    6035 }
    6036 
    6037 void VmaJsonWriter::BeginArray(bool singleLine)
    6038 {
    6039  VMA_ASSERT(!m_InsideString);
    6040 
    6041  BeginValue(false);
    6042  m_SB.Add('[');
    6043 
    6044  StackItem item;
    6045  item.type = COLLECTION_TYPE_ARRAY;
    6046  item.valueCount = 0;
    6047  item.singleLineMode = singleLine;
    6048  m_Stack.push_back(item);
    6049 }
    6050 
    6051 void VmaJsonWriter::EndArray()
    6052 {
    6053  VMA_ASSERT(!m_InsideString);
    6054 
    6055  WriteIndent(true);
    6056  m_SB.Add(']');
    6057 
    6058  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6059  m_Stack.pop_back();
    6060 }
    6061 
    6062 void VmaJsonWriter::WriteString(const char* pStr)
    6063 {
    6064  BeginString(pStr);
    6065  EndString();
    6066 }
    6067 
    6068 void VmaJsonWriter::BeginString(const char* pStr)
    6069 {
    6070  VMA_ASSERT(!m_InsideString);
    6071 
    6072  BeginValue(true);
    6073  m_SB.Add('"');
    6074  m_InsideString = true;
    6075  if(pStr != VMA_NULL && pStr[0] != '\0')
    6076  {
    6077  ContinueString(pStr);
    6078  }
    6079 }
    6080 
    6081 void VmaJsonWriter::ContinueString(const char* pStr)
    6082 {
    6083  VMA_ASSERT(m_InsideString);
    6084 
    6085  const size_t strLen = strlen(pStr);
    6086  for(size_t i = 0; i < strLen; ++i)
    6087  {
    6088  char ch = pStr[i];
    6089  if(ch == '\\')
    6090  {
    6091  m_SB.Add("\\\\");
    6092  }
    6093  else if(ch == '"')
    6094  {
    6095  m_SB.Add("\\\"");
    6096  }
    6097  else if(ch >= 32)
    6098  {
    6099  m_SB.Add(ch);
    6100  }
    6101  else switch(ch)
    6102  {
    6103  case '\b':
    6104  m_SB.Add("\\b");
    6105  break;
    6106  case '\f':
    6107  m_SB.Add("\\f");
    6108  break;
    6109  case '\n':
    6110  m_SB.Add("\\n");
    6111  break;
    6112  case '\r':
    6113  m_SB.Add("\\r");
    6114  break;
    6115  case '\t':
    6116  m_SB.Add("\\t");
    6117  break;
    6118  default:
    6119  VMA_ASSERT(0 && "Character not currently supported.");
    6120  break;
    6121  }
    6122  }
    6123 }
    6124 
    6125 void VmaJsonWriter::ContinueString(uint32_t n)
    6126 {
    6127  VMA_ASSERT(m_InsideString);
    6128  m_SB.AddNumber(n);
    6129 }
    6130 
    6131 void VmaJsonWriter::ContinueString(uint64_t n)
    6132 {
    6133  VMA_ASSERT(m_InsideString);
    6134  m_SB.AddNumber(n);
    6135 }
    6136 
    6137 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6138 {
    6139  VMA_ASSERT(m_InsideString);
    6140  m_SB.AddPointer(ptr);
    6141 }
    6142 
    6143 void VmaJsonWriter::EndString(const char* pStr)
    6144 {
    6145  VMA_ASSERT(m_InsideString);
    6146  if(pStr != VMA_NULL && pStr[0] != '\0')
    6147  {
    6148  ContinueString(pStr);
    6149  }
    6150  m_SB.Add('"');
    6151  m_InsideString = false;
    6152 }
    6153 
    6154 void VmaJsonWriter::WriteNumber(uint32_t n)
    6155 {
    6156  VMA_ASSERT(!m_InsideString);
    6157  BeginValue(false);
    6158  m_SB.AddNumber(n);
    6159 }
    6160 
    6161 void VmaJsonWriter::WriteNumber(uint64_t n)
    6162 {
    6163  VMA_ASSERT(!m_InsideString);
    6164  BeginValue(false);
    6165  m_SB.AddNumber(n);
    6166 }
    6167 
    6168 void VmaJsonWriter::WriteBool(bool b)
    6169 {
    6170  VMA_ASSERT(!m_InsideString);
    6171  BeginValue(false);
    6172  m_SB.Add(b ? "true" : "false");
    6173 }
    6174 
    6175 void VmaJsonWriter::WriteNull()
    6176 {
    6177  VMA_ASSERT(!m_InsideString);
    6178  BeginValue(false);
    6179  m_SB.Add("null");
    6180 }
    6181 
    6182 void VmaJsonWriter::BeginValue(bool isString)
    6183 {
    6184  if(!m_Stack.empty())
    6185  {
    6186  StackItem& currItem = m_Stack.back();
    6187  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6188  currItem.valueCount % 2 == 0)
    6189  {
    6190  VMA_ASSERT(isString);
    6191  }
    6192 
    6193  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6194  currItem.valueCount % 2 != 0)
    6195  {
    6196  m_SB.Add(": ");
    6197  }
    6198  else if(currItem.valueCount > 0)
    6199  {
    6200  m_SB.Add(", ");
    6201  WriteIndent();
    6202  }
    6203  else
    6204  {
    6205  WriteIndent();
    6206  }
    6207  ++currItem.valueCount;
    6208  }
    6209 }
    6210 
    6211 void VmaJsonWriter::WriteIndent(bool oneLess)
    6212 {
    6213  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6214  {
    6215  m_SB.AddNewLine();
    6216 
    6217  size_t count = m_Stack.size();
    6218  if(count > 0 && oneLess)
    6219  {
    6220  --count;
    6221  }
    6222  for(size_t i = 0; i < count; ++i)
    6223  {
    6224  m_SB.Add(INDENT);
    6225  }
    6226  }
    6227 }
    6228 
    6229 #endif // #if VMA_STATS_STRING_ENABLED
    6230 
    6232 
    6233 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6234 {
    6235  if(IsUserDataString())
    6236  {
    6237  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6238 
    6239  FreeUserDataString(hAllocator);
    6240 
    6241  if(pUserData != VMA_NULL)
    6242  {
    6243  const char* const newStrSrc = (char*)pUserData;
    6244  const size_t newStrLen = strlen(newStrSrc);
    6245  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6246  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6247  m_pUserData = newStrDst;
    6248  }
    6249  }
    6250  else
    6251  {
    6252  m_pUserData = pUserData;
    6253  }
    6254 }
    6255 
    6256 void VmaAllocation_T::ChangeBlockAllocation(
    6257  VmaAllocator hAllocator,
    6258  VmaDeviceMemoryBlock* block,
    6259  VkDeviceSize offset)
    6260 {
    6261  VMA_ASSERT(block != VMA_NULL);
    6262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6263 
    6264  // Move mapping reference counter from old block to new block.
    6265  if(block != m_BlockAllocation.m_Block)
    6266  {
    6267  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6268  if(IsPersistentMap())
    6269  ++mapRefCount;
    6270  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6271  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6272  }
    6273 
    6274  m_BlockAllocation.m_Block = block;
    6275  m_BlockAllocation.m_Offset = offset;
    6276 }
    6277 
    6278 VkDeviceSize VmaAllocation_T::GetOffset() const
    6279 {
    6280  switch(m_Type)
    6281  {
    6282  case ALLOCATION_TYPE_BLOCK:
    6283  return m_BlockAllocation.m_Offset;
    6284  case ALLOCATION_TYPE_DEDICATED:
    6285  return 0;
    6286  default:
    6287  VMA_ASSERT(0);
    6288  return 0;
    6289  }
    6290 }
    6291 
    6292 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6293 {
    6294  switch(m_Type)
    6295  {
    6296  case ALLOCATION_TYPE_BLOCK:
    6297  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6298  case ALLOCATION_TYPE_DEDICATED:
    6299  return m_DedicatedAllocation.m_hMemory;
    6300  default:
    6301  VMA_ASSERT(0);
    6302  return VK_NULL_HANDLE;
    6303  }
    6304 }
    6305 
    6306 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6307 {
    6308  switch(m_Type)
    6309  {
    6310  case ALLOCATION_TYPE_BLOCK:
    6311  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6312  case ALLOCATION_TYPE_DEDICATED:
    6313  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6314  default:
    6315  VMA_ASSERT(0);
    6316  return UINT32_MAX;
    6317  }
    6318 }
    6319 
    6320 void* VmaAllocation_T::GetMappedData() const
    6321 {
    6322  switch(m_Type)
    6323  {
    6324  case ALLOCATION_TYPE_BLOCK:
    6325  if(m_MapCount != 0)
    6326  {
    6327  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6328  VMA_ASSERT(pBlockData != VMA_NULL);
    6329  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6330  }
    6331  else
    6332  {
    6333  return VMA_NULL;
    6334  }
    6335  break;
    6336  case ALLOCATION_TYPE_DEDICATED:
    6337  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6338  return m_DedicatedAllocation.m_pMappedData;
    6339  default:
    6340  VMA_ASSERT(0);
    6341  return VMA_NULL;
    6342  }
    6343 }
    6344 
    6345 bool VmaAllocation_T::CanBecomeLost() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_CanBecomeLost;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return false;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return false;
    6356  }
    6357 }
    6358 
    6359 VmaPool VmaAllocation_T::GetPool() const
    6360 {
    6361  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6362  return m_BlockAllocation.m_hPool;
    6363 }
    6364 
    6365 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6366 {
    6367  VMA_ASSERT(CanBecomeLost());
    6368 
    6369  /*
    6370  Warning: This is a carefully designed algorithm.
    6371  Do not modify unless you really know what you're doing :)
    6372  */
    6373  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6374  for(;;)
    6375  {
    6376  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6377  {
    6378  VMA_ASSERT(0);
    6379  return false;
    6380  }
    6381  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6382  {
    6383  return false;
    6384  }
    6385  else // Last use time earlier than current time.
    6386  {
    6387  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6388  {
    6389  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6390  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6391  return true;
    6392  }
    6393  }
    6394  }
    6395 }
    6396 
    6397 #if VMA_STATS_STRING_ENABLED
    6398 
    6399 // Correspond to values of enum VmaSuballocationType.
    6400 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6401  "FREE",
    6402  "UNKNOWN",
    6403  "BUFFER",
    6404  "IMAGE_UNKNOWN",
    6405  "IMAGE_LINEAR",
    6406  "IMAGE_OPTIMAL",
    6407 };
    6408 
    6409 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6410 {
    6411  json.WriteString("Type");
    6412  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6413 
    6414  json.WriteString("Size");
    6415  json.WriteNumber(m_Size);
    6416 
    6417  if(m_pUserData != VMA_NULL)
    6418  {
    6419  json.WriteString("UserData");
    6420  if(IsUserDataString())
    6421  {
    6422  json.WriteString((const char*)m_pUserData);
    6423  }
    6424  else
    6425  {
    6426  json.BeginString();
    6427  json.ContinueString_Pointer(m_pUserData);
    6428  json.EndString();
    6429  }
    6430  }
    6431 
    6432  json.WriteString("CreationFrameIndex");
    6433  json.WriteNumber(m_CreationFrameIndex);
    6434 
    6435  json.WriteString("LastUseFrameIndex");
    6436  json.WriteNumber(GetLastUseFrameIndex());
    6437 
    6438  if(m_BufferImageUsage != 0)
    6439  {
    6440  json.WriteString("Usage");
    6441  json.WriteNumber(m_BufferImageUsage);
    6442  }
    6443 }
    6444 
    6445 #endif
    6446 
    6447 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6448 {
    6449  VMA_ASSERT(IsUserDataString());
    6450  if(m_pUserData != VMA_NULL)
    6451  {
    6452  char* const oldStr = (char*)m_pUserData;
    6453  const size_t oldStrLen = strlen(oldStr);
    6454  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6455  m_pUserData = VMA_NULL;
    6456  }
    6457 }
    6458 
    6459 void VmaAllocation_T::BlockAllocMap()
    6460 {
    6461  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6462 
    6463  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6464  {
    6465  ++m_MapCount;
    6466  }
    6467  else
    6468  {
    6469  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6470  }
    6471 }
    6472 
    6473 void VmaAllocation_T::BlockAllocUnmap()
    6474 {
    6475  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6476 
    6477  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6478  {
    6479  --m_MapCount;
    6480  }
    6481  else
    6482  {
    6483  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6484  }
    6485 }
    6486 
    6487 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6488 {
    6489  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6490 
    6491  if(m_MapCount != 0)
    6492  {
    6493  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6494  {
    6495  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6496  *ppData = m_DedicatedAllocation.m_pMappedData;
    6497  ++m_MapCount;
    6498  return VK_SUCCESS;
    6499  }
    6500  else
    6501  {
    6502  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6503  return VK_ERROR_MEMORY_MAP_FAILED;
    6504  }
    6505  }
    6506  else
    6507  {
    6508  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6509  hAllocator->m_hDevice,
    6510  m_DedicatedAllocation.m_hMemory,
    6511  0, // offset
    6512  VK_WHOLE_SIZE,
    6513  0, // flags
    6514  ppData);
    6515  if(result == VK_SUCCESS)
    6516  {
    6517  m_DedicatedAllocation.m_pMappedData = *ppData;
    6518  m_MapCount = 1;
    6519  }
    6520  return result;
    6521  }
    6522 }
    6523 
    6524 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6525 {
    6526  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6527 
    6528  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6529  {
    6530  --m_MapCount;
    6531  if(m_MapCount == 0)
    6532  {
    6533  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6534  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6535  hAllocator->m_hDevice,
    6536  m_DedicatedAllocation.m_hMemory);
    6537  }
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6542  }
    6543 }
    6544 
    6545 #if VMA_STATS_STRING_ENABLED
    6546 
    6547 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6548 {
    6549  json.BeginObject();
    6550 
    6551  json.WriteString("Blocks");
    6552  json.WriteNumber(stat.blockCount);
    6553 
    6554  json.WriteString("Allocations");
    6555  json.WriteNumber(stat.allocationCount);
    6556 
    6557  json.WriteString("UnusedRanges");
    6558  json.WriteNumber(stat.unusedRangeCount);
    6559 
    6560  json.WriteString("UsedBytes");
    6561  json.WriteNumber(stat.usedBytes);
    6562 
    6563  json.WriteString("UnusedBytes");
    6564  json.WriteNumber(stat.unusedBytes);
    6565 
    6566  if(stat.allocationCount > 1)
    6567  {
    6568  json.WriteString("AllocationSize");
    6569  json.BeginObject(true);
    6570  json.WriteString("Min");
    6571  json.WriteNumber(stat.allocationSizeMin);
    6572  json.WriteString("Avg");
    6573  json.WriteNumber(stat.allocationSizeAvg);
    6574  json.WriteString("Max");
    6575  json.WriteNumber(stat.allocationSizeMax);
    6576  json.EndObject();
    6577  }
    6578 
    6579  if(stat.unusedRangeCount > 1)
    6580  {
    6581  json.WriteString("UnusedRangeSize");
    6582  json.BeginObject(true);
    6583  json.WriteString("Min");
    6584  json.WriteNumber(stat.unusedRangeSizeMin);
    6585  json.WriteString("Avg");
    6586  json.WriteNumber(stat.unusedRangeSizeAvg);
    6587  json.WriteString("Max");
    6588  json.WriteNumber(stat.unusedRangeSizeMax);
    6589  json.EndObject();
    6590  }
    6591 
    6592  json.EndObject();
    6593 }
    6594 
    6595 #endif // #if VMA_STATS_STRING_ENABLED
    6596 
    6597 struct VmaSuballocationItemSizeLess
    6598 {
    6599  bool operator()(
    6600  const VmaSuballocationList::iterator lhs,
    6601  const VmaSuballocationList::iterator rhs) const
    6602  {
    6603  return lhs->size < rhs->size;
    6604  }
    6605  bool operator()(
    6606  const VmaSuballocationList::iterator lhs,
    6607  VkDeviceSize rhsSize) const
    6608  {
    6609  return lhs->size < rhsSize;
    6610  }
    6611 };
    6612 
    6613 
    6615 // class VmaBlockMetadata
    6616 
    6617 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6618  m_Size(0),
    6619  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6620 {
    6621 }
    6622 
    6623 #if VMA_STATS_STRING_ENABLED
    6624 
    6625 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6626  VkDeviceSize unusedBytes,
    6627  size_t allocationCount,
    6628  size_t unusedRangeCount) const
    6629 {
    6630  json.BeginObject();
    6631 
    6632  json.WriteString("TotalBytes");
    6633  json.WriteNumber(GetSize());
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(unusedBytes);
    6637 
    6638  json.WriteString("Allocations");
    6639  json.WriteNumber((uint64_t)allocationCount);
    6640 
    6641  json.WriteString("UnusedRanges");
    6642  json.WriteNumber((uint64_t)unusedRangeCount);
    6643 
    6644  json.WriteString("Suballocations");
    6645  json.BeginArray();
    6646 }
    6647 
    6648 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6649  VkDeviceSize offset,
    6650  VmaAllocation hAllocation) const
    6651 {
    6652  json.BeginObject(true);
    6653 
    6654  json.WriteString("Offset");
    6655  json.WriteNumber(offset);
    6656 
    6657  hAllocation->PrintParameters(json);
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6663  VkDeviceSize offset,
    6664  VkDeviceSize size) const
    6665 {
    6666  json.BeginObject(true);
    6667 
    6668  json.WriteString("Offset");
    6669  json.WriteNumber(offset);
    6670 
    6671  json.WriteString("Type");
    6672  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6673 
    6674  json.WriteString("Size");
    6675  json.WriteNumber(size);
    6676 
    6677  json.EndObject();
    6678 }
    6679 
    6680 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6681 {
    6682  json.EndArray();
    6683  json.EndObject();
    6684 }
    6685 
    6686 #endif // #if VMA_STATS_STRING_ENABLED
    6687 
    6689 // class VmaBlockMetadata_Generic
    6690 
    6691 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6692  VmaBlockMetadata(hAllocator),
    6693  m_FreeCount(0),
    6694  m_SumFreeSize(0),
    6695  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6696  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6697 {
    6698 }
    6699 
    6700 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6701 {
    6702 }
    6703 
    6704 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6705 {
    6706  VmaBlockMetadata::Init(size);
    6707 
    6708  m_FreeCount = 1;
    6709  m_SumFreeSize = size;
    6710 
    6711  VmaSuballocation suballoc = {};
    6712  suballoc.offset = 0;
    6713  suballoc.size = size;
    6714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6715  suballoc.hAllocation = VK_NULL_HANDLE;
    6716 
    6717  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6718  m_Suballocations.push_back(suballoc);
    6719  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6720  --suballocItem;
    6721  m_FreeSuballocationsBySize.push_back(suballocItem);
    6722 }
    6723 
    6724 bool VmaBlockMetadata_Generic::Validate() const
    6725 {
    6726  VMA_VALIDATE(!m_Suballocations.empty());
    6727 
    6728  // Expected offset of new suballocation as calculated from previous ones.
    6729  VkDeviceSize calculatedOffset = 0;
    6730  // Expected number of free suballocations as calculated from traversing their list.
    6731  uint32_t calculatedFreeCount = 0;
    6732  // Expected sum size of free suballocations as calculated from traversing their list.
    6733  VkDeviceSize calculatedSumFreeSize = 0;
    6734  // Expected number of free suballocations that should be registered in
    6735  // m_FreeSuballocationsBySize calculated from traversing their list.
    6736  size_t freeSuballocationsToRegister = 0;
    6737  // True if previous visited suballocation was free.
    6738  bool prevFree = false;
    6739 
    6740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6741  suballocItem != m_Suballocations.cend();
    6742  ++suballocItem)
    6743  {
    6744  const VmaSuballocation& subAlloc = *suballocItem;
    6745 
    6746  // Actual offset of this suballocation doesn't match expected one.
    6747  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6748 
    6749  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6750  // Two adjacent free suballocations are invalid. They should be merged.
    6751  VMA_VALIDATE(!prevFree || !currFree);
    6752 
    6753  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6754 
    6755  if(currFree)
    6756  {
    6757  calculatedSumFreeSize += subAlloc.size;
    6758  ++calculatedFreeCount;
    6759  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6760  {
    6761  ++freeSuballocationsToRegister;
    6762  }
    6763 
    6764  // Margin required between allocations - every free space must be at least that large.
    6765  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6766  }
    6767  else
    6768  {
    6769  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6770  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6771 
    6772  // Margin required between allocations - previous allocation must be free.
    6773  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6774  }
    6775 
    6776  calculatedOffset += subAlloc.size;
    6777  prevFree = currFree;
    6778  }
    6779 
    6780  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6781  // match expected one.
    6782  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6783 
    6784  VkDeviceSize lastSize = 0;
    6785  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6786  {
    6787  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6788 
    6789  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6790  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6791  // They must be sorted by size ascending.
    6792  VMA_VALIDATE(suballocItem->size >= lastSize);
    6793 
    6794  lastSize = suballocItem->size;
    6795  }
    6796 
    6797  // Check if totals match calculacted values.
    6798  VMA_VALIDATE(ValidateFreeSuballocationList());
    6799  VMA_VALIDATE(calculatedOffset == GetSize());
    6800  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6801  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6802 
    6803  return true;
    6804 }
    6805 
    6806 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6807 {
    6808  if(!m_FreeSuballocationsBySize.empty())
    6809  {
    6810  return m_FreeSuballocationsBySize.back()->size;
    6811  }
    6812  else
    6813  {
    6814  return 0;
    6815  }
    6816 }
    6817 
    6818 bool VmaBlockMetadata_Generic::IsEmpty() const
    6819 {
    6820  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6821 }
    6822 
    6823 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6824 {
    6825  outInfo.blockCount = 1;
    6826 
    6827  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6828  outInfo.allocationCount = rangeCount - m_FreeCount;
    6829  outInfo.unusedRangeCount = m_FreeCount;
    6830 
    6831  outInfo.unusedBytes = m_SumFreeSize;
    6832  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6833 
    6834  outInfo.allocationSizeMin = UINT64_MAX;
    6835  outInfo.allocationSizeMax = 0;
    6836  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6837  outInfo.unusedRangeSizeMax = 0;
    6838 
    6839  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6840  suballocItem != m_Suballocations.cend();
    6841  ++suballocItem)
    6842  {
    6843  const VmaSuballocation& suballoc = *suballocItem;
    6844  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6845  {
    6846  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6847  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6848  }
    6849  else
    6850  {
    6851  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6852  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6853  }
    6854  }
    6855 }
    6856 
    6857 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6858 {
    6859  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6860 
    6861  inoutStats.size += GetSize();
    6862  inoutStats.unusedSize += m_SumFreeSize;
    6863  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6864  inoutStats.unusedRangeCount += m_FreeCount;
    6865  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6866 }
    6867 
    6868 #if VMA_STATS_STRING_ENABLED
    6869 
    6870 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6871 {
    6872  PrintDetailedMap_Begin(json,
    6873  m_SumFreeSize, // unusedBytes
    6874  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6875  m_FreeCount); // unusedRangeCount
    6876 
    6877  size_t i = 0;
    6878  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6879  suballocItem != m_Suballocations.cend();
    6880  ++suballocItem, ++i)
    6881  {
    6882  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6883  {
    6884  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6885  }
    6886  else
    6887  {
    6888  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6889  }
    6890  }
    6891 
    6892  PrintDetailedMap_End(json);
    6893 }
    6894 
    6895 #endif // #if VMA_STATS_STRING_ENABLED
    6896 
    6897 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6898  uint32_t currentFrameIndex,
    6899  uint32_t frameInUseCount,
    6900  VkDeviceSize bufferImageGranularity,
    6901  VkDeviceSize allocSize,
    6902  VkDeviceSize allocAlignment,
    6903  bool upperAddress,
    6904  VmaSuballocationType allocType,
    6905  bool canMakeOtherLost,
    6906  uint32_t strategy,
    6907  VmaAllocationRequest* pAllocationRequest)
    6908 {
    6909  VMA_ASSERT(allocSize > 0);
    6910  VMA_ASSERT(!upperAddress);
    6911  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6912  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6913  VMA_HEAVY_ASSERT(Validate());
    6914 
    6915  // There is not enough total free space in this block to fullfill the request: Early return.
    6916  if(canMakeOtherLost == false &&
    6917  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6918  {
    6919  return false;
    6920  }
    6921 
    6922  // New algorithm, efficiently searching freeSuballocationsBySize.
    6923  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6924  if(freeSuballocCount > 0)
    6925  {
    6927  {
    6928  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6929  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6930  m_FreeSuballocationsBySize.data(),
    6931  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6932  allocSize + 2 * VMA_DEBUG_MARGIN,
    6933  VmaSuballocationItemSizeLess());
    6934  size_t index = it - m_FreeSuballocationsBySize.data();
    6935  for(; index < freeSuballocCount; ++index)
    6936  {
    6937  if(CheckAllocation(
    6938  currentFrameIndex,
    6939  frameInUseCount,
    6940  bufferImageGranularity,
    6941  allocSize,
    6942  allocAlignment,
    6943  allocType,
    6944  m_FreeSuballocationsBySize[index],
    6945  false, // canMakeOtherLost
    6946  &pAllocationRequest->offset,
    6947  &pAllocationRequest->itemsToMakeLostCount,
    6948  &pAllocationRequest->sumFreeSize,
    6949  &pAllocationRequest->sumItemSize))
    6950  {
    6951  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6952  return true;
    6953  }
    6954  }
    6955  }
    6956  else // WORST_FIT, FIRST_FIT
    6957  {
    6958  // Search staring from biggest suballocations.
    6959  for(size_t index = freeSuballocCount; index--; )
    6960  {
    6961  if(CheckAllocation(
    6962  currentFrameIndex,
    6963  frameInUseCount,
    6964  bufferImageGranularity,
    6965  allocSize,
    6966  allocAlignment,
    6967  allocType,
    6968  m_FreeSuballocationsBySize[index],
    6969  false, // canMakeOtherLost
    6970  &pAllocationRequest->offset,
    6971  &pAllocationRequest->itemsToMakeLostCount,
    6972  &pAllocationRequest->sumFreeSize,
    6973  &pAllocationRequest->sumItemSize))
    6974  {
    6975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6976  return true;
    6977  }
    6978  }
    6979  }
    6980  }
    6981 
    6982  if(canMakeOtherLost)
    6983  {
    6984  // Brute-force algorithm. TODO: Come up with something better.
    6985 
    6986  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6987  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6988 
    6989  VmaAllocationRequest tmpAllocRequest = {};
    6990  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6991  suballocIt != m_Suballocations.end();
    6992  ++suballocIt)
    6993  {
    6994  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6995  suballocIt->hAllocation->CanBecomeLost())
    6996  {
    6997  if(CheckAllocation(
    6998  currentFrameIndex,
    6999  frameInUseCount,
    7000  bufferImageGranularity,
    7001  allocSize,
    7002  allocAlignment,
    7003  allocType,
    7004  suballocIt,
    7005  canMakeOtherLost,
    7006  &tmpAllocRequest.offset,
    7007  &tmpAllocRequest.itemsToMakeLostCount,
    7008  &tmpAllocRequest.sumFreeSize,
    7009  &tmpAllocRequest.sumItemSize))
    7010  {
    7011  tmpAllocRequest.item = suballocIt;
    7012 
    7013  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7015  {
    7016  *pAllocationRequest = tmpAllocRequest;
    7017  }
    7018  }
    7019  }
    7020  }
    7021 
    7022  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7023  {
    7024  return true;
    7025  }
    7026  }
    7027 
    7028  return false;
    7029 }
    7030 
    7031 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7032  uint32_t currentFrameIndex,
    7033  uint32_t frameInUseCount,
    7034  VmaAllocationRequest* pAllocationRequest)
    7035 {
    7036  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7037  {
    7038  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7039  {
    7040  ++pAllocationRequest->item;
    7041  }
    7042  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7043  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7044  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7045  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7046  {
    7047  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7048  --pAllocationRequest->itemsToMakeLostCount;
    7049  }
    7050  else
    7051  {
    7052  return false;
    7053  }
    7054  }
    7055 
    7056  VMA_HEAVY_ASSERT(Validate());
    7057  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7058  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7059 
    7060  return true;
    7061 }
    7062 
    7063 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7064 {
    7065  uint32_t lostAllocationCount = 0;
    7066  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7067  it != m_Suballocations.end();
    7068  ++it)
    7069  {
    7070  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7071  it->hAllocation->CanBecomeLost() &&
    7072  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7073  {
    7074  it = FreeSuballocation(it);
    7075  ++lostAllocationCount;
    7076  }
    7077  }
    7078  return lostAllocationCount;
    7079 }
    7080 
    7081 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7082 {
    7083  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7084  it != m_Suballocations.end();
    7085  ++it)
    7086  {
    7087  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7088  {
    7089  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7090  {
    7091  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7092  return VK_ERROR_VALIDATION_FAILED_EXT;
    7093  }
    7094  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7095  {
    7096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7097  return VK_ERROR_VALIDATION_FAILED_EXT;
    7098  }
    7099  }
    7100  }
    7101 
    7102  return VK_SUCCESS;
    7103 }
    7104 
    7105 void VmaBlockMetadata_Generic::Alloc(
    7106  const VmaAllocationRequest& request,
    7107  VmaSuballocationType type,
    7108  VkDeviceSize allocSize,
    7109  bool upperAddress,
    7110  VmaAllocation hAllocation)
    7111 {
    7112  VMA_ASSERT(!upperAddress);
    7113  VMA_ASSERT(request.item != m_Suballocations.end());
    7114  VmaSuballocation& suballoc = *request.item;
    7115  // Given suballocation is a free block.
    7116  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7117  // Given offset is inside this suballocation.
    7118  VMA_ASSERT(request.offset >= suballoc.offset);
    7119  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7120  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7121  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7122 
    7123  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7124  // it to become used.
    7125  UnregisterFreeSuballocation(request.item);
    7126 
    7127  suballoc.offset = request.offset;
    7128  suballoc.size = allocSize;
    7129  suballoc.type = type;
    7130  suballoc.hAllocation = hAllocation;
    7131 
    7132  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7133  if(paddingEnd)
    7134  {
    7135  VmaSuballocation paddingSuballoc = {};
    7136  paddingSuballoc.offset = request.offset + allocSize;
    7137  paddingSuballoc.size = paddingEnd;
    7138  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7139  VmaSuballocationList::iterator next = request.item;
    7140  ++next;
    7141  const VmaSuballocationList::iterator paddingEndItem =
    7142  m_Suballocations.insert(next, paddingSuballoc);
    7143  RegisterFreeSuballocation(paddingEndItem);
    7144  }
    7145 
    7146  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7147  if(paddingBegin)
    7148  {
    7149  VmaSuballocation paddingSuballoc = {};
    7150  paddingSuballoc.offset = request.offset - paddingBegin;
    7151  paddingSuballoc.size = paddingBegin;
    7152  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7153  const VmaSuballocationList::iterator paddingBeginItem =
    7154  m_Suballocations.insert(request.item, paddingSuballoc);
    7155  RegisterFreeSuballocation(paddingBeginItem);
    7156  }
    7157 
    7158  // Update totals.
    7159  m_FreeCount = m_FreeCount - 1;
    7160  if(paddingBegin > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  if(paddingEnd > 0)
    7165  {
    7166  ++m_FreeCount;
    7167  }
    7168  m_SumFreeSize -= allocSize;
    7169 }
    7170 
    7171 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7172 {
    7173  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7174  suballocItem != m_Suballocations.end();
    7175  ++suballocItem)
    7176  {
    7177  VmaSuballocation& suballoc = *suballocItem;
    7178  if(suballoc.hAllocation == allocation)
    7179  {
    7180  FreeSuballocation(suballocItem);
    7181  VMA_HEAVY_ASSERT(Validate());
    7182  return;
    7183  }
    7184  }
    7185  VMA_ASSERT(0 && "Not found!");
    7186 }
    7187 
    7188 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7189 {
    7190  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7191  suballocItem != m_Suballocations.end();
    7192  ++suballocItem)
    7193  {
    7194  VmaSuballocation& suballoc = *suballocItem;
    7195  if(suballoc.offset == offset)
    7196  {
    7197  FreeSuballocation(suballocItem);
    7198  return;
    7199  }
    7200  }
    7201  VMA_ASSERT(0 && "Not found!");
    7202 }
    7203 
    7204 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7205 {
    7206  VkDeviceSize lastSize = 0;
    7207  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7208  {
    7209  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7210 
    7211  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7212  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7213  VMA_VALIDATE(it->size >= lastSize);
    7214  lastSize = it->size;
    7215  }
    7216  return true;
    7217 }
    7218 
    7219 bool VmaBlockMetadata_Generic::CheckAllocation(
    7220  uint32_t currentFrameIndex,
    7221  uint32_t frameInUseCount,
    7222  VkDeviceSize bufferImageGranularity,
    7223  VkDeviceSize allocSize,
    7224  VkDeviceSize allocAlignment,
    7225  VmaSuballocationType allocType,
    7226  VmaSuballocationList::const_iterator suballocItem,
    7227  bool canMakeOtherLost,
    7228  VkDeviceSize* pOffset,
    7229  size_t* itemsToMakeLostCount,
    7230  VkDeviceSize* pSumFreeSize,
    7231  VkDeviceSize* pSumItemSize) const
    7232 {
    7233  VMA_ASSERT(allocSize > 0);
    7234  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7235  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7236  VMA_ASSERT(pOffset != VMA_NULL);
    7237 
    7238  *itemsToMakeLostCount = 0;
    7239  *pSumFreeSize = 0;
    7240  *pSumItemSize = 0;
    7241 
    7242  if(canMakeOtherLost)
    7243  {
    7244  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7245  {
    7246  *pSumFreeSize = suballocItem->size;
    7247  }
    7248  else
    7249  {
    7250  if(suballocItem->hAllocation->CanBecomeLost() &&
    7251  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7252  {
    7253  ++*itemsToMakeLostCount;
    7254  *pSumItemSize = suballocItem->size;
    7255  }
    7256  else
    7257  {
    7258  return false;
    7259  }
    7260  }
    7261 
    7262  // Remaining size is too small for this request: Early return.
    7263  if(GetSize() - suballocItem->offset < allocSize)
    7264  {
    7265  return false;
    7266  }
    7267 
    7268  // Start from offset equal to beginning of this suballocation.
    7269  *pOffset = suballocItem->offset;
    7270 
    7271  // Apply VMA_DEBUG_MARGIN at the beginning.
    7272  if(VMA_DEBUG_MARGIN > 0)
    7273  {
    7274  *pOffset += VMA_DEBUG_MARGIN;
    7275  }
    7276 
    7277  // Apply alignment.
    7278  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7279 
    7280  // Check previous suballocations for BufferImageGranularity conflicts.
    7281  // Make bigger alignment if necessary.
    7282  if(bufferImageGranularity > 1)
    7283  {
    7284  bool bufferImageGranularityConflict = false;
    7285  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7286  while(prevSuballocItem != m_Suballocations.cbegin())
    7287  {
    7288  --prevSuballocItem;
    7289  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7290  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7291  {
    7292  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7293  {
    7294  bufferImageGranularityConflict = true;
    7295  break;
    7296  }
    7297  }
    7298  else
    7299  // Already on previous page.
    7300  break;
    7301  }
    7302  if(bufferImageGranularityConflict)
    7303  {
    7304  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7305  }
    7306  }
    7307 
    7308  // Now that we have final *pOffset, check if we are past suballocItem.
    7309  // If yes, return false - this function should be called for another suballocItem as starting point.
    7310  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7311  {
    7312  return false;
    7313  }
    7314 
    7315  // Calculate padding at the beginning based on current offset.
    7316  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7317 
    7318  // Calculate required margin at the end.
    7319  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7320 
    7321  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7322  // Another early return check.
    7323  if(suballocItem->offset + totalSize > GetSize())
    7324  {
    7325  return false;
    7326  }
    7327 
    7328  // Advance lastSuballocItem until desired size is reached.
    7329  // Update itemsToMakeLostCount.
    7330  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7331  if(totalSize > suballocItem->size)
    7332  {
    7333  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7334  while(remainingSize > 0)
    7335  {
    7336  ++lastSuballocItem;
    7337  if(lastSuballocItem == m_Suballocations.cend())
    7338  {
    7339  return false;
    7340  }
    7341  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7342  {
    7343  *pSumFreeSize += lastSuballocItem->size;
    7344  }
    7345  else
    7346  {
    7347  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7348  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7349  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7350  {
    7351  ++*itemsToMakeLostCount;
    7352  *pSumItemSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  return false;
    7357  }
    7358  }
    7359  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7360  remainingSize - lastSuballocItem->size : 0;
    7361  }
    7362  }
    7363 
    7364  // Check next suballocations for BufferImageGranularity conflicts.
    7365  // If conflict exists, we must mark more allocations lost or fail.
    7366  if(bufferImageGranularity > 1)
    7367  {
    7368  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7369  ++nextSuballocItem;
    7370  while(nextSuballocItem != m_Suballocations.cend())
    7371  {
    7372  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7373  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7374  {
    7375  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7376  {
    7377  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7378  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7379  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7380  {
    7381  ++*itemsToMakeLostCount;
    7382  }
    7383  else
    7384  {
    7385  return false;
    7386  }
    7387  }
    7388  }
    7389  else
    7390  {
    7391  // Already on next page.
    7392  break;
    7393  }
    7394  ++nextSuballocItem;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  const VmaSuballocation& suballoc = *suballocItem;
    7401  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7402 
    7403  *pSumFreeSize = suballoc.size;
    7404 
    7405  // Size of this suballocation is too small for this request: Early return.
    7406  if(suballoc.size < allocSize)
    7407  {
    7408  return false;
    7409  }
    7410 
    7411  // Start from offset equal to beginning of this suballocation.
    7412  *pOffset = suballoc.offset;
    7413 
    7414  // Apply VMA_DEBUG_MARGIN at the beginning.
    7415  if(VMA_DEBUG_MARGIN > 0)
    7416  {
    7417  *pOffset += VMA_DEBUG_MARGIN;
    7418  }
    7419 
    7420  // Apply alignment.
    7421  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7422 
    7423  // Check previous suballocations for BufferImageGranularity conflicts.
    7424  // Make bigger alignment if necessary.
    7425  if(bufferImageGranularity > 1)
    7426  {
    7427  bool bufferImageGranularityConflict = false;
    7428  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7429  while(prevSuballocItem != m_Suballocations.cbegin())
    7430  {
    7431  --prevSuballocItem;
    7432  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7433  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7434  {
    7435  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7436  {
    7437  bufferImageGranularityConflict = true;
    7438  break;
    7439  }
    7440  }
    7441  else
    7442  // Already on previous page.
    7443  break;
    7444  }
    7445  if(bufferImageGranularityConflict)
    7446  {
    7447  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7448  }
    7449  }
    7450 
    7451  // Calculate padding at the beginning based on current offset.
    7452  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7453 
    7454  // Calculate required margin at the end.
    7455  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7456 
    7457  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7458  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7459  {
    7460  return false;
    7461  }
    7462 
    7463  // Check next suballocations for BufferImageGranularity conflicts.
    7464  // If conflict exists, allocation cannot be made here.
    7465  if(bufferImageGranularity > 1)
    7466  {
    7467  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7468  ++nextSuballocItem;
    7469  while(nextSuballocItem != m_Suballocations.cend())
    7470  {
    7471  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7472  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7473  {
    7474  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7475  {
    7476  return false;
    7477  }
    7478  }
    7479  else
    7480  {
    7481  // Already on next page.
    7482  break;
    7483  }
    7484  ++nextSuballocItem;
    7485  }
    7486  }
    7487  }
    7488 
    7489  // All tests passed: Success. pOffset is already filled.
    7490  return true;
    7491 }
    7492 
    7493 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7494 {
    7495  VMA_ASSERT(item != m_Suballocations.end());
    7496  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7497 
    7498  VmaSuballocationList::iterator nextItem = item;
    7499  ++nextItem;
    7500  VMA_ASSERT(nextItem != m_Suballocations.end());
    7501  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7502 
    7503  item->size += nextItem->size;
    7504  --m_FreeCount;
    7505  m_Suballocations.erase(nextItem);
    7506 }
    7507 
    7508 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7509 {
    7510  // Change this suballocation to be marked as free.
    7511  VmaSuballocation& suballoc = *suballocItem;
    7512  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7513  suballoc.hAllocation = VK_NULL_HANDLE;
    7514 
    7515  // Update totals.
    7516  ++m_FreeCount;
    7517  m_SumFreeSize += suballoc.size;
    7518 
    7519  // Merge with previous and/or next suballocation if it's also free.
    7520  bool mergeWithNext = false;
    7521  bool mergeWithPrev = false;
    7522 
    7523  VmaSuballocationList::iterator nextItem = suballocItem;
    7524  ++nextItem;
    7525  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7526  {
    7527  mergeWithNext = true;
    7528  }
    7529 
    7530  VmaSuballocationList::iterator prevItem = suballocItem;
    7531  if(suballocItem != m_Suballocations.begin())
    7532  {
    7533  --prevItem;
    7534  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7535  {
    7536  mergeWithPrev = true;
    7537  }
    7538  }
    7539 
    7540  if(mergeWithNext)
    7541  {
    7542  UnregisterFreeSuballocation(nextItem);
    7543  MergeFreeWithNext(suballocItem);
    7544  }
    7545 
    7546  if(mergeWithPrev)
    7547  {
    7548  UnregisterFreeSuballocation(prevItem);
    7549  MergeFreeWithNext(prevItem);
    7550  RegisterFreeSuballocation(prevItem);
    7551  return prevItem;
    7552  }
    7553  else
    7554  {
    7555  RegisterFreeSuballocation(suballocItem);
    7556  return suballocItem;
    7557  }
    7558 }
    7559 
    7560 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7561 {
    7562  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7563  VMA_ASSERT(item->size > 0);
    7564 
    7565  // You may want to enable this validation at the beginning or at the end of
    7566  // this function, depending on what do you want to check.
    7567  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7568 
    7569  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7570  {
    7571  if(m_FreeSuballocationsBySize.empty())
    7572  {
    7573  m_FreeSuballocationsBySize.push_back(item);
    7574  }
    7575  else
    7576  {
    7577  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7578  }
    7579  }
    7580 
    7581  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7582 }
    7583 
    7584 
    7585 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7586 {
    7587  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7588  VMA_ASSERT(item->size > 0);
    7589 
    7590  // You may want to enable this validation at the beginning or at the end of
    7591  // this function, depending on what do you want to check.
    7592  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7593 
    7594  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7595  {
    7596  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7597  m_FreeSuballocationsBySize.data(),
    7598  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7599  item,
    7600  VmaSuballocationItemSizeLess());
    7601  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7602  index < m_FreeSuballocationsBySize.size();
    7603  ++index)
    7604  {
    7605  if(m_FreeSuballocationsBySize[index] == item)
    7606  {
    7607  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7608  return;
    7609  }
    7610  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7611  }
    7612  VMA_ASSERT(0 && "Not found.");
    7613  }
    7614 
    7615  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7616 }
    7617 
    7619 // class VmaBlockMetadata_Linear
    7620 
    7621 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7622  VmaBlockMetadata(hAllocator),
    7623  m_SumFreeSize(0),
    7624  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7625  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7626  m_1stVectorIndex(0),
    7627  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7628  m_1stNullItemsBeginCount(0),
    7629  m_1stNullItemsMiddleCount(0),
    7630  m_2ndNullItemsCount(0)
    7631 {
    7632 }
    7633 
    7634 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7635 {
    7636 }
    7637 
    7638 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7639 {
    7640  VmaBlockMetadata::Init(size);
    7641  m_SumFreeSize = size;
    7642 }
    7643 
    7644 bool VmaBlockMetadata_Linear::Validate() const
    7645 {
    7646  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7647  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7648 
    7649  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7650  VMA_VALIDATE(!suballocations1st.empty() ||
    7651  suballocations2nd.empty() ||
    7652  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7653 
    7654  if(!suballocations1st.empty())
    7655  {
    7656  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7657  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7658  // Null item at the end should be just pop_back().
    7659  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7660  }
    7661  if(!suballocations2nd.empty())
    7662  {
    7663  // Null item at the end should be just pop_back().
    7664  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7665  }
    7666 
    7667  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7668  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7669 
    7670  VkDeviceSize sumUsedSize = 0;
    7671  const size_t suballoc1stCount = suballocations1st.size();
    7672  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7673 
    7674  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7675  {
    7676  const size_t suballoc2ndCount = suballocations2nd.size();
    7677  size_t nullItem2ndCount = 0;
    7678  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7679  {
    7680  const VmaSuballocation& suballoc = suballocations2nd[i];
    7681  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7682 
    7683  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7684  VMA_VALIDATE(suballoc.offset >= offset);
    7685 
    7686  if(!currFree)
    7687  {
    7688  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7689  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7690  sumUsedSize += suballoc.size;
    7691  }
    7692  else
    7693  {
    7694  ++nullItem2ndCount;
    7695  }
    7696 
    7697  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7698  }
    7699 
    7700  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7701  }
    7702 
    7703  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7704  {
    7705  const VmaSuballocation& suballoc = suballocations1st[i];
    7706  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7707  suballoc.hAllocation == VK_NULL_HANDLE);
    7708  }
    7709 
    7710  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7711 
    7712  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7716 
    7717  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7718  VMA_VALIDATE(suballoc.offset >= offset);
    7719  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7720 
    7721  if(!currFree)
    7722  {
    7723  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7724  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7725  sumUsedSize += suballoc.size;
    7726  }
    7727  else
    7728  {
    7729  ++nullItem1stCount;
    7730  }
    7731 
    7732  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7733  }
    7734  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7735 
    7736  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7737  {
    7738  const size_t suballoc2ndCount = suballocations2nd.size();
    7739  size_t nullItem2ndCount = 0;
    7740  for(size_t i = suballoc2ndCount; i--; )
    7741  {
    7742  const VmaSuballocation& suballoc = suballocations2nd[i];
    7743  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7744 
    7745  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7746  VMA_VALIDATE(suballoc.offset >= offset);
    7747 
    7748  if(!currFree)
    7749  {
    7750  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7751  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7752  sumUsedSize += suballoc.size;
    7753  }
    7754  else
    7755  {
    7756  ++nullItem2ndCount;
    7757  }
    7758 
    7759  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7760  }
    7761 
    7762  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7763  }
    7764 
    7765  VMA_VALIDATE(offset <= GetSize());
    7766  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7767 
    7768  return true;
    7769 }
    7770 
    7771 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7772 {
    7773  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7774  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7775 }
    7776 
    7777 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7778 {
    7779  const VkDeviceSize size = GetSize();
    7780 
    7781  /*
    7782  We don't consider gaps inside allocation vectors with freed allocations because
    7783  they are not suitable for reuse in linear allocator. We consider only space that
    7784  is available for new allocations.
    7785  */
    7786  if(IsEmpty())
    7787  {
    7788  return size;
    7789  }
    7790 
    7791  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7792 
    7793  switch(m_2ndVectorMode)
    7794  {
    7795  case SECOND_VECTOR_EMPTY:
    7796  /*
    7797  Available space is after end of 1st, as well as before beginning of 1st (which
    7798  whould make it a ring buffer).
    7799  */
    7800  {
    7801  const size_t suballocations1stCount = suballocations1st.size();
    7802  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7803  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7804  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7805  return VMA_MAX(
    7806  firstSuballoc.offset,
    7807  size - (lastSuballoc.offset + lastSuballoc.size));
    7808  }
    7809  break;
    7810 
    7811  case SECOND_VECTOR_RING_BUFFER:
    7812  /*
    7813  Available space is only between end of 2nd and beginning of 1st.
    7814  */
    7815  {
    7816  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7817  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7818  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7819  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7820  }
    7821  break;
    7822 
    7823  case SECOND_VECTOR_DOUBLE_STACK:
    7824  /*
    7825  Available space is only between end of 1st and top of 2nd.
    7826  */
    7827  {
    7828  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7829  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7830  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7831  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7832  }
    7833  break;
    7834 
    7835  default:
    7836  VMA_ASSERT(0);
    7837  return 0;
    7838  }
    7839 }
    7840 
    7841 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7842 {
    7843  const VkDeviceSize size = GetSize();
    7844  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7845  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7846  const size_t suballoc1stCount = suballocations1st.size();
    7847  const size_t suballoc2ndCount = suballocations2nd.size();
    7848 
    7849  outInfo.blockCount = 1;
    7850  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7851  outInfo.unusedRangeCount = 0;
    7852  outInfo.usedBytes = 0;
    7853  outInfo.allocationSizeMin = UINT64_MAX;
    7854  outInfo.allocationSizeMax = 0;
    7855  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7856  outInfo.unusedRangeSizeMax = 0;
    7857 
    7858  VkDeviceSize lastOffset = 0;
    7859 
    7860  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7861  {
    7862  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7863  size_t nextAlloc2ndIndex = 0;
    7864  while(lastOffset < freeSpace2ndTo1stEnd)
    7865  {
    7866  // Find next non-null allocation or move nextAllocIndex to the end.
    7867  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7868  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7869  {
    7870  ++nextAlloc2ndIndex;
    7871  }
    7872 
    7873  // Found non-null allocation.
    7874  if(nextAlloc2ndIndex < suballoc2ndCount)
    7875  {
    7876  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7877 
    7878  // 1. Process free space before this allocation.
    7879  if(lastOffset < suballoc.offset)
    7880  {
    7881  // There is free space from lastOffset to suballoc.offset.
    7882  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7883  ++outInfo.unusedRangeCount;
    7884  outInfo.unusedBytes += unusedRangeSize;
    7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7886  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7887  }
    7888 
    7889  // 2. Process this allocation.
    7890  // There is allocation with suballoc.offset, suballoc.size.
    7891  outInfo.usedBytes += suballoc.size;
    7892  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7893  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7894 
    7895  // 3. Prepare for next iteration.
    7896  lastOffset = suballoc.offset + suballoc.size;
    7897  ++nextAlloc2ndIndex;
    7898  }
    7899  // We are at the end.
    7900  else
    7901  {
    7902  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7903  if(lastOffset < freeSpace2ndTo1stEnd)
    7904  {
    7905  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7906  ++outInfo.unusedRangeCount;
    7907  outInfo.unusedBytes += unusedRangeSize;
    7908  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7909  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7910  }
    7911 
    7912  // End of loop.
    7913  lastOffset = freeSpace2ndTo1stEnd;
    7914  }
    7915  }
    7916  }
    7917 
    7918  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7919  const VkDeviceSize freeSpace1stTo2ndEnd =
    7920  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7921  while(lastOffset < freeSpace1stTo2ndEnd)
    7922  {
    7923  // Find next non-null allocation or move nextAllocIndex to the end.
    7924  while(nextAlloc1stIndex < suballoc1stCount &&
    7925  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7926  {
    7927  ++nextAlloc1stIndex;
    7928  }
    7929 
    7930  // Found non-null allocation.
    7931  if(nextAlloc1stIndex < suballoc1stCount)
    7932  {
    7933  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7934 
    7935  // 1. Process free space before this allocation.
    7936  if(lastOffset < suballoc.offset)
    7937  {
    7938  // There is free space from lastOffset to suballoc.offset.
    7939  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7940  ++outInfo.unusedRangeCount;
    7941  outInfo.unusedBytes += unusedRangeSize;
    7942  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7943  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7944  }
    7945 
    7946  // 2. Process this allocation.
    7947  // There is allocation with suballoc.offset, suballoc.size.
    7948  outInfo.usedBytes += suballoc.size;
    7949  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7950  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7951 
    7952  // 3. Prepare for next iteration.
    7953  lastOffset = suballoc.offset + suballoc.size;
    7954  ++nextAlloc1stIndex;
    7955  }
    7956  // We are at the end.
    7957  else
    7958  {
    7959  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7960  if(lastOffset < freeSpace1stTo2ndEnd)
    7961  {
    7962  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7963  ++outInfo.unusedRangeCount;
    7964  outInfo.unusedBytes += unusedRangeSize;
    7965  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7966  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7967  }
    7968 
    7969  // End of loop.
    7970  lastOffset = freeSpace1stTo2ndEnd;
    7971  }
    7972  }
    7973 
    7974  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7975  {
    7976  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7977  while(lastOffset < size)
    7978  {
    7979  // Find next non-null allocation or move nextAllocIndex to the end.
    7980  while(nextAlloc2ndIndex != SIZE_MAX &&
    7981  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7982  {
    7983  --nextAlloc2ndIndex;
    7984  }
    7985 
    7986  // Found non-null allocation.
    7987  if(nextAlloc2ndIndex != SIZE_MAX)
    7988  {
    7989  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7990 
    7991  // 1. Process free space before this allocation.
    7992  if(lastOffset < suballoc.offset)
    7993  {
    7994  // There is free space from lastOffset to suballoc.offset.
    7995  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7996  ++outInfo.unusedRangeCount;
    7997  outInfo.unusedBytes += unusedRangeSize;
    7998  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7999  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8000  }
    8001 
    8002  // 2. Process this allocation.
    8003  // There is allocation with suballoc.offset, suballoc.size.
    8004  outInfo.usedBytes += suballoc.size;
    8005  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8006  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8007 
    8008  // 3. Prepare for next iteration.
    8009  lastOffset = suballoc.offset + suballoc.size;
    8010  --nextAlloc2ndIndex;
    8011  }
    8012  // We are at the end.
    8013  else
    8014  {
    8015  // There is free space from lastOffset to size.
    8016  if(lastOffset < size)
    8017  {
    8018  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8019  ++outInfo.unusedRangeCount;
    8020  outInfo.unusedBytes += unusedRangeSize;
    8021  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8022  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8023  }
    8024 
    8025  // End of loop.
    8026  lastOffset = size;
    8027  }
    8028  }
    8029  }
    8030 
    8031  outInfo.unusedBytes = size - outInfo.usedBytes;
    8032 }
    8033 
    8034 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8035 {
    8036  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8038  const VkDeviceSize size = GetSize();
    8039  const size_t suballoc1stCount = suballocations1st.size();
    8040  const size_t suballoc2ndCount = suballocations2nd.size();
    8041 
    8042  inoutStats.size += size;
    8043 
    8044  VkDeviceSize lastOffset = 0;
    8045 
    8046  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8047  {
    8048  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8049  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8050  while(lastOffset < freeSpace2ndTo1stEnd)
    8051  {
    8052  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8053  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8054  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8055  {
    8056  ++nextAlloc2ndIndex;
    8057  }
    8058 
    8059  // Found non-null allocation.
    8060  if(nextAlloc2ndIndex < suballoc2ndCount)
    8061  {
    8062  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8063 
    8064  // 1. Process free space before this allocation.
    8065  if(lastOffset < suballoc.offset)
    8066  {
    8067  // There is free space from lastOffset to suballoc.offset.
    8068  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8069  inoutStats.unusedSize += unusedRangeSize;
    8070  ++inoutStats.unusedRangeCount;
    8071  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8072  }
    8073 
    8074  // 2. Process this allocation.
    8075  // There is allocation with suballoc.offset, suballoc.size.
    8076  ++inoutStats.allocationCount;
    8077 
    8078  // 3. Prepare for next iteration.
    8079  lastOffset = suballoc.offset + suballoc.size;
    8080  ++nextAlloc2ndIndex;
    8081  }
    8082  // We are at the end.
    8083  else
    8084  {
    8085  if(lastOffset < freeSpace2ndTo1stEnd)
    8086  {
    8087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8088  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8089  inoutStats.unusedSize += unusedRangeSize;
    8090  ++inoutStats.unusedRangeCount;
    8091  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8092  }
    8093 
    8094  // End of loop.
    8095  lastOffset = freeSpace2ndTo1stEnd;
    8096  }
    8097  }
    8098  }
    8099 
    8100  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8101  const VkDeviceSize freeSpace1stTo2ndEnd =
    8102  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8103  while(lastOffset < freeSpace1stTo2ndEnd)
    8104  {
    8105  // Find next non-null allocation or move nextAllocIndex to the end.
    8106  while(nextAlloc1stIndex < suballoc1stCount &&
    8107  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8108  {
    8109  ++nextAlloc1stIndex;
    8110  }
    8111 
    8112  // Found non-null allocation.
    8113  if(nextAlloc1stIndex < suballoc1stCount)
    8114  {
    8115  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8116 
    8117  // 1. Process free space before this allocation.
    8118  if(lastOffset < suballoc.offset)
    8119  {
    8120  // There is free space from lastOffset to suballoc.offset.
    8121  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8122  inoutStats.unusedSize += unusedRangeSize;
    8123  ++inoutStats.unusedRangeCount;
    8124  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8125  }
    8126 
    8127  // 2. Process this allocation.
    8128  // There is allocation with suballoc.offset, suballoc.size.
    8129  ++inoutStats.allocationCount;
    8130 
    8131  // 3. Prepare for next iteration.
    8132  lastOffset = suballoc.offset + suballoc.size;
    8133  ++nextAlloc1stIndex;
    8134  }
    8135  // We are at the end.
    8136  else
    8137  {
    8138  if(lastOffset < freeSpace1stTo2ndEnd)
    8139  {
    8140  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8141  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8142  inoutStats.unusedSize += unusedRangeSize;
    8143  ++inoutStats.unusedRangeCount;
    8144  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8145  }
    8146 
    8147  // End of loop.
    8148  lastOffset = freeSpace1stTo2ndEnd;
    8149  }
    8150  }
    8151 
    8152  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8153  {
    8154  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8155  while(lastOffset < size)
    8156  {
    8157  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8158  while(nextAlloc2ndIndex != SIZE_MAX &&
    8159  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8160  {
    8161  --nextAlloc2ndIndex;
    8162  }
    8163 
    8164  // Found non-null allocation.
    8165  if(nextAlloc2ndIndex != SIZE_MAX)
    8166  {
    8167  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8168 
    8169  // 1. Process free space before this allocation.
    8170  if(lastOffset < suballoc.offset)
    8171  {
    8172  // There is free space from lastOffset to suballoc.offset.
    8173  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8174  inoutStats.unusedSize += unusedRangeSize;
    8175  ++inoutStats.unusedRangeCount;
    8176  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8177  }
    8178 
    8179  // 2. Process this allocation.
    8180  // There is allocation with suballoc.offset, suballoc.size.
    8181  ++inoutStats.allocationCount;
    8182 
    8183  // 3. Prepare for next iteration.
    8184  lastOffset = suballoc.offset + suballoc.size;
    8185  --nextAlloc2ndIndex;
    8186  }
    8187  // We are at the end.
    8188  else
    8189  {
    8190  if(lastOffset < size)
    8191  {
    8192  // There is free space from lastOffset to size.
    8193  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8194  inoutStats.unusedSize += unusedRangeSize;
    8195  ++inoutStats.unusedRangeCount;
    8196  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8197  }
    8198 
    8199  // End of loop.
    8200  lastOffset = size;
    8201  }
    8202  }
    8203  }
    8204 }
    8205 
    8206 #if VMA_STATS_STRING_ENABLED
    8207 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8208 {
    8209  const VkDeviceSize size = GetSize();
    8210  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8211  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8212  const size_t suballoc1stCount = suballocations1st.size();
    8213  const size_t suballoc2ndCount = suballocations2nd.size();
    8214 
    8215  // FIRST PASS
    8216 
    8217  size_t unusedRangeCount = 0;
    8218  VkDeviceSize usedBytes = 0;
    8219 
    8220  VkDeviceSize lastOffset = 0;
    8221 
    8222  size_t alloc2ndCount = 0;
    8223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8224  {
    8225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8226  size_t nextAlloc2ndIndex = 0;
    8227  while(lastOffset < freeSpace2ndTo1stEnd)
    8228  {
    8229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8230  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8232  {
    8233  ++nextAlloc2ndIndex;
    8234  }
    8235 
    8236  // Found non-null allocation.
    8237  if(nextAlloc2ndIndex < suballoc2ndCount)
    8238  {
    8239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8240 
    8241  // 1. Process free space before this allocation.
    8242  if(lastOffset < suballoc.offset)
    8243  {
    8244  // There is free space from lastOffset to suballoc.offset.
    8245  ++unusedRangeCount;
    8246  }
    8247 
    8248  // 2. Process this allocation.
    8249  // There is allocation with suballoc.offset, suballoc.size.
    8250  ++alloc2ndCount;
    8251  usedBytes += suballoc.size;
    8252 
    8253  // 3. Prepare for next iteration.
    8254  lastOffset = suballoc.offset + suballoc.size;
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257  // We are at the end.
    8258  else
    8259  {
    8260  if(lastOffset < freeSpace2ndTo1stEnd)
    8261  {
    8262  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8263  ++unusedRangeCount;
    8264  }
    8265 
    8266  // End of loop.
    8267  lastOffset = freeSpace2ndTo1stEnd;
    8268  }
    8269  }
    8270  }
    8271 
    8272  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8273  size_t alloc1stCount = 0;
    8274  const VkDeviceSize freeSpace1stTo2ndEnd =
    8275  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8276  while(lastOffset < freeSpace1stTo2ndEnd)
    8277  {
    8278  // Find next non-null allocation or move nextAllocIndex to the end.
    8279  while(nextAlloc1stIndex < suballoc1stCount &&
    8280  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8281  {
    8282  ++nextAlloc1stIndex;
    8283  }
    8284 
    8285  // Found non-null allocation.
    8286  if(nextAlloc1stIndex < suballoc1stCount)
    8287  {
    8288  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8289 
    8290  // 1. Process free space before this allocation.
    8291  if(lastOffset < suballoc.offset)
    8292  {
    8293  // There is free space from lastOffset to suballoc.offset.
    8294  ++unusedRangeCount;
    8295  }
    8296 
    8297  // 2. Process this allocation.
    8298  // There is allocation with suballoc.offset, suballoc.size.
    8299  ++alloc1stCount;
    8300  usedBytes += suballoc.size;
    8301 
    8302  // 3. Prepare for next iteration.
    8303  lastOffset = suballoc.offset + suballoc.size;
    8304  ++nextAlloc1stIndex;
    8305  }
    8306  // We are at the end.
    8307  else
    8308  {
    8309  if(lastOffset < size)
    8310  {
    8311  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8312  ++unusedRangeCount;
    8313  }
    8314 
    8315  // End of loop.
    8316  lastOffset = freeSpace1stTo2ndEnd;
    8317  }
    8318  }
    8319 
    8320  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8321  {
    8322  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8323  while(lastOffset < size)
    8324  {
    8325  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8326  while(nextAlloc2ndIndex != SIZE_MAX &&
    8327  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8328  {
    8329  --nextAlloc2ndIndex;
    8330  }
    8331 
    8332  // Found non-null allocation.
    8333  if(nextAlloc2ndIndex != SIZE_MAX)
    8334  {
    8335  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8336 
    8337  // 1. Process free space before this allocation.
    8338  if(lastOffset < suballoc.offset)
    8339  {
    8340  // There is free space from lastOffset to suballoc.offset.
    8341  ++unusedRangeCount;
    8342  }
    8343 
    8344  // 2. Process this allocation.
    8345  // There is allocation with suballoc.offset, suballoc.size.
    8346  ++alloc2ndCount;
    8347  usedBytes += suballoc.size;
    8348 
    8349  // 3. Prepare for next iteration.
    8350  lastOffset = suballoc.offset + suballoc.size;
    8351  --nextAlloc2ndIndex;
    8352  }
    8353  // We are at the end.
    8354  else
    8355  {
    8356  if(lastOffset < size)
    8357  {
    8358  // There is free space from lastOffset to size.
    8359  ++unusedRangeCount;
    8360  }
    8361 
    8362  // End of loop.
    8363  lastOffset = size;
    8364  }
    8365  }
    8366  }
    8367 
    8368  const VkDeviceSize unusedBytes = size - usedBytes;
    8369  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8370 
    8371  // SECOND PASS
    8372  lastOffset = 0;
    8373 
    8374  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8375  {
    8376  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8377  size_t nextAlloc2ndIndex = 0;
    8378  while(lastOffset < freeSpace2ndTo1stEnd)
    8379  {
    8380  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8381  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8382  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8383  {
    8384  ++nextAlloc2ndIndex;
    8385  }
    8386 
    8387  // Found non-null allocation.
    8388  if(nextAlloc2ndIndex < suballoc2ndCount)
    8389  {
    8390  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8391 
    8392  // 1. Process free space before this allocation.
    8393  if(lastOffset < suballoc.offset)
    8394  {
    8395  // There is free space from lastOffset to suballoc.offset.
    8396  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8397  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8398  }
    8399 
    8400  // 2. Process this allocation.
    8401  // There is allocation with suballoc.offset, suballoc.size.
    8402  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8403 
    8404  // 3. Prepare for next iteration.
    8405  lastOffset = suballoc.offset + suballoc.size;
    8406  ++nextAlloc2ndIndex;
    8407  }
    8408  // We are at the end.
    8409  else
    8410  {
    8411  if(lastOffset < freeSpace2ndTo1stEnd)
    8412  {
    8413  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8414  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8415  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8416  }
    8417 
    8418  // End of loop.
    8419  lastOffset = freeSpace2ndTo1stEnd;
    8420  }
    8421  }
    8422  }
    8423 
    8424  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8425  while(lastOffset < freeSpace1stTo2ndEnd)
    8426  {
    8427  // Find next non-null allocation or move nextAllocIndex to the end.
    8428  while(nextAlloc1stIndex < suballoc1stCount &&
    8429  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8430  {
    8431  ++nextAlloc1stIndex;
    8432  }
    8433 
    8434  // Found non-null allocation.
    8435  if(nextAlloc1stIndex < suballoc1stCount)
    8436  {
    8437  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8438 
    8439  // 1. Process free space before this allocation.
    8440  if(lastOffset < suballoc.offset)
    8441  {
    8442  // There is free space from lastOffset to suballoc.offset.
    8443  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8444  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8450 
    8451  // 3. Prepare for next iteration.
    8452  lastOffset = suballoc.offset + suballoc.size;
    8453  ++nextAlloc1stIndex;
    8454  }
    8455  // We are at the end.
    8456  else
    8457  {
    8458  if(lastOffset < freeSpace1stTo2ndEnd)
    8459  {
    8460  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8461  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8462  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace1stTo2ndEnd;
    8467  }
    8468  }
    8469 
    8470  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8471  {
    8472  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8473  while(lastOffset < size)
    8474  {
    8475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8476  while(nextAlloc2ndIndex != SIZE_MAX &&
    8477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8478  {
    8479  --nextAlloc2ndIndex;
    8480  }
    8481 
    8482  // Found non-null allocation.
    8483  if(nextAlloc2ndIndex != SIZE_MAX)
    8484  {
    8485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8486 
    8487  // 1. Process free space before this allocation.
    8488  if(lastOffset < suballoc.offset)
    8489  {
    8490  // There is free space from lastOffset to suballoc.offset.
    8491  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8492  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8493  }
    8494 
    8495  // 2. Process this allocation.
    8496  // There is allocation with suballoc.offset, suballoc.size.
    8497  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8498 
    8499  // 3. Prepare for next iteration.
    8500  lastOffset = suballoc.offset + suballoc.size;
    8501  --nextAlloc2ndIndex;
    8502  }
    8503  // We are at the end.
    8504  else
    8505  {
    8506  if(lastOffset < size)
    8507  {
    8508  // There is free space from lastOffset to size.
    8509  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8510  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8511  }
    8512 
    8513  // End of loop.
    8514  lastOffset = size;
    8515  }
    8516  }
    8517  }
    8518 
    8519  PrintDetailedMap_End(json);
    8520 }
    8521 #endif // #if VMA_STATS_STRING_ENABLED
    8522 
    8523 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8524  uint32_t currentFrameIndex,
    8525  uint32_t frameInUseCount,
    8526  VkDeviceSize bufferImageGranularity,
    8527  VkDeviceSize allocSize,
    8528  VkDeviceSize allocAlignment,
    8529  bool upperAddress,
    8530  VmaSuballocationType allocType,
    8531  bool canMakeOtherLost,
    8532  uint32_t strategy,
    8533  VmaAllocationRequest* pAllocationRequest)
    8534 {
    8535  VMA_ASSERT(allocSize > 0);
    8536  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8537  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8538  VMA_HEAVY_ASSERT(Validate());
    8539 
    8540  const VkDeviceSize size = GetSize();
    8541  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8542  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8543 
    8544  if(upperAddress)
    8545  {
    8546  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8547  {
    8548  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8549  return false;
    8550  }
    8551 
    8552  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8553  if(allocSize > size)
    8554  {
    8555  return false;
    8556  }
    8557  VkDeviceSize resultBaseOffset = size - allocSize;
    8558  if(!suballocations2nd.empty())
    8559  {
    8560  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8561  resultBaseOffset = lastSuballoc.offset - allocSize;
    8562  if(allocSize > lastSuballoc.offset)
    8563  {
    8564  return false;
    8565  }
    8566  }
    8567 
    8568  // Start from offset equal to end of free space.
    8569  VkDeviceSize resultOffset = resultBaseOffset;
    8570 
    8571  // Apply VMA_DEBUG_MARGIN at the end.
    8572  if(VMA_DEBUG_MARGIN > 0)
    8573  {
    8574  if(resultOffset < VMA_DEBUG_MARGIN)
    8575  {
    8576  return false;
    8577  }
    8578  resultOffset -= VMA_DEBUG_MARGIN;
    8579  }
    8580 
    8581  // Apply alignment.
    8582  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8583 
    8584  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8585  // Make bigger alignment if necessary.
    8586  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8587  {
    8588  bool bufferImageGranularityConflict = false;
    8589  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8590  {
    8591  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8592  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8593  {
    8594  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8595  {
    8596  bufferImageGranularityConflict = true;
    8597  break;
    8598  }
    8599  }
    8600  else
    8601  // Already on previous page.
    8602  break;
    8603  }
    8604  if(bufferImageGranularityConflict)
    8605  {
    8606  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8607  }
    8608  }
    8609 
    8610  // There is enough free space.
    8611  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8612  suballocations1st.back().offset + suballocations1st.back().size :
    8613  0;
    8614  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8615  {
    8616  // Check previous suballocations for BufferImageGranularity conflicts.
    8617  // If conflict exists, allocation cannot be made here.
    8618  if(bufferImageGranularity > 1)
    8619  {
    8620  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8621  {
    8622  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8623  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8624  {
    8625  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8626  {
    8627  return false;
    8628  }
    8629  }
    8630  else
    8631  {
    8632  // Already on next page.
    8633  break;
    8634  }
    8635  }
    8636  }
    8637 
    8638  // All tests passed: Success.
    8639  pAllocationRequest->offset = resultOffset;
    8640  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8641  pAllocationRequest->sumItemSize = 0;
    8642  // pAllocationRequest->item unused.
    8643  pAllocationRequest->itemsToMakeLostCount = 0;
    8644  return true;
    8645  }
    8646  }
    8647  else // !upperAddress
    8648  {
    8649  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8650  {
    8651  // Try to allocate at the end of 1st vector.
    8652 
    8653  VkDeviceSize resultBaseOffset = 0;
    8654  if(!suballocations1st.empty())
    8655  {
    8656  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8657  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8658  }
    8659 
    8660  // Start from offset equal to beginning of free space.
    8661  VkDeviceSize resultOffset = resultBaseOffset;
    8662 
    8663  // Apply VMA_DEBUG_MARGIN at the beginning.
    8664  if(VMA_DEBUG_MARGIN > 0)
    8665  {
    8666  resultOffset += VMA_DEBUG_MARGIN;
    8667  }
    8668 
    8669  // Apply alignment.
    8670  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8671 
    8672  // Check previous suballocations for BufferImageGranularity conflicts.
    8673  // Make bigger alignment if necessary.
    8674  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8675  {
    8676  bool bufferImageGranularityConflict = false;
    8677  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8678  {
    8679  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8680  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8681  {
    8682  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8683  {
    8684  bufferImageGranularityConflict = true;
    8685  break;
    8686  }
    8687  }
    8688  else
    8689  // Already on previous page.
    8690  break;
    8691  }
    8692  if(bufferImageGranularityConflict)
    8693  {
    8694  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8695  }
    8696  }
    8697 
    8698  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8699  suballocations2nd.back().offset : size;
    8700 
    8701  // There is enough free space at the end after alignment.
    8702  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8703  {
    8704  // Check next suballocations for BufferImageGranularity conflicts.
    8705  // If conflict exists, allocation cannot be made here.
    8706  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8707  {
    8708  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8709  {
    8710  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8711  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8712  {
    8713  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8714  {
    8715  return false;
    8716  }
    8717  }
    8718  else
    8719  {
    8720  // Already on previous page.
    8721  break;
    8722  }
    8723  }
    8724  }
    8725 
    8726  // All tests passed: Success.
    8727  pAllocationRequest->offset = resultOffset;
    8728  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8729  pAllocationRequest->sumItemSize = 0;
    8730  // pAllocationRequest->item unused.
    8731  pAllocationRequest->itemsToMakeLostCount = 0;
    8732  return true;
    8733  }
    8734  }
    8735 
    8736  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8737  // beginning of 1st vector as the end of free space.
    8738  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8739  {
    8740  VMA_ASSERT(!suballocations1st.empty());
    8741 
    8742  VkDeviceSize resultBaseOffset = 0;
    8743  if(!suballocations2nd.empty())
    8744  {
    8745  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8746  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8747  }
    8748 
    8749  // Start from offset equal to beginning of free space.
    8750  VkDeviceSize resultOffset = resultBaseOffset;
    8751 
    8752  // Apply VMA_DEBUG_MARGIN at the beginning.
    8753  if(VMA_DEBUG_MARGIN > 0)
    8754  {
    8755  resultOffset += VMA_DEBUG_MARGIN;
    8756  }
    8757 
    8758  // Apply alignment.
    8759  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8760 
    8761  // Check previous suballocations for BufferImageGranularity conflicts.
    8762  // Make bigger alignment if necessary.
    8763  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8764  {
    8765  bool bufferImageGranularityConflict = false;
    8766  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8767  {
    8768  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8769  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8770  {
    8771  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8772  {
    8773  bufferImageGranularityConflict = true;
    8774  break;
    8775  }
    8776  }
    8777  else
    8778  // Already on previous page.
    8779  break;
    8780  }
    8781  if(bufferImageGranularityConflict)
    8782  {
    8783  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8784  }
    8785  }
    8786 
    8787  pAllocationRequest->itemsToMakeLostCount = 0;
    8788  pAllocationRequest->sumItemSize = 0;
    8789  size_t index1st = m_1stNullItemsBeginCount;
    8790 
    8791  if(canMakeOtherLost)
    8792  {
    8793  while(index1st < suballocations1st.size() &&
    8794  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8795  {
    8796  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8797  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8798  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8799  {
    8800  // No problem.
    8801  }
    8802  else
    8803  {
    8804  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8805  if(suballoc.hAllocation->CanBecomeLost() &&
    8806  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8807  {
    8808  ++pAllocationRequest->itemsToMakeLostCount;
    8809  pAllocationRequest->sumItemSize += suballoc.size;
    8810  }
    8811  else
    8812  {
    8813  return false;
    8814  }
    8815  }
    8816  ++index1st;
    8817  }
    8818 
    8819  // Check next suballocations for BufferImageGranularity conflicts.
    8820  // If conflict exists, we must mark more allocations lost or fail.
    8821  if(bufferImageGranularity > 1)
    8822  {
    8823  while(index1st < suballocations1st.size())
    8824  {
    8825  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8826  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8827  {
    8828  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8829  {
    8830  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8831  if(suballoc.hAllocation->CanBecomeLost() &&
    8832  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8833  {
    8834  ++pAllocationRequest->itemsToMakeLostCount;
    8835  pAllocationRequest->sumItemSize += suballoc.size;
    8836  }
    8837  else
    8838  {
    8839  return false;
    8840  }
    8841  }
    8842  }
    8843  else
    8844  {
    8845  // Already on next page.
    8846  break;
    8847  }
    8848  ++index1st;
    8849  }
    8850  }
    8851  }
    8852 
    8853  // There is enough free space at the end after alignment.
    8854  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8855  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8856  {
    8857  // Check next suballocations for BufferImageGranularity conflicts.
    8858  // If conflict exists, allocation cannot be made here.
    8859  if(bufferImageGranularity > 1)
    8860  {
    8861  for(size_t nextSuballocIndex = index1st;
    8862  nextSuballocIndex < suballocations1st.size();
    8863  nextSuballocIndex++)
    8864  {
    8865  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8866  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8867  {
    8868  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8869  {
    8870  return false;
    8871  }
    8872  }
    8873  else
    8874  {
    8875  // Already on next page.
    8876  break;
    8877  }
    8878  }
    8879  }
    8880 
    8881  // All tests passed: Success.
    8882  pAllocationRequest->offset = resultOffset;
    8883  pAllocationRequest->sumFreeSize =
    8884  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8885  - resultBaseOffset
    8886  - pAllocationRequest->sumItemSize;
    8887  // pAllocationRequest->item unused.
    8888  return true;
    8889  }
    8890  }
    8891  }
    8892 
    8893  return false;
    8894 }
    8895 
    8896 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8897  uint32_t currentFrameIndex,
    8898  uint32_t frameInUseCount,
    8899  VmaAllocationRequest* pAllocationRequest)
    8900 {
    8901  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8902  {
    8903  return true;
    8904  }
    8905 
    8906  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8907 
    8908  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8909  size_t index1st = m_1stNullItemsBeginCount;
    8910  size_t madeLostCount = 0;
    8911  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8912  {
    8913  VMA_ASSERT(index1st < suballocations1st.size());
    8914  VmaSuballocation& suballoc = suballocations1st[index1st];
    8915  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8916  {
    8917  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8918  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8919  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8920  {
    8921  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8922  suballoc.hAllocation = VK_NULL_HANDLE;
    8923  m_SumFreeSize += suballoc.size;
    8924  ++m_1stNullItemsMiddleCount;
    8925  ++madeLostCount;
    8926  }
    8927  else
    8928  {
    8929  return false;
    8930  }
    8931  }
    8932  ++index1st;
    8933  }
    8934 
    8935  CleanupAfterFree();
    8936  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8937 
    8938  return true;
    8939 }
    8940 
    8941 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8942 {
    8943  uint32_t lostAllocationCount = 0;
    8944 
    8945  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8946  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8947  {
    8948  VmaSuballocation& suballoc = suballocations1st[i];
    8949  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8950  suballoc.hAllocation->CanBecomeLost() &&
    8951  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8952  {
    8953  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8954  suballoc.hAllocation = VK_NULL_HANDLE;
    8955  ++m_1stNullItemsMiddleCount;
    8956  m_SumFreeSize += suballoc.size;
    8957  ++lostAllocationCount;
    8958  }
    8959  }
    8960 
    8961  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8962  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8963  {
    8964  VmaSuballocation& suballoc = suballocations2nd[i];
    8965  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8966  suballoc.hAllocation->CanBecomeLost() &&
    8967  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8968  {
    8969  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8970  suballoc.hAllocation = VK_NULL_HANDLE;
    8971  ++m_2ndNullItemsCount;
    8972  ++lostAllocationCount;
    8973  }
    8974  }
    8975 
    8976  if(lostAllocationCount)
    8977  {
    8978  CleanupAfterFree();
    8979  }
    8980 
    8981  return lostAllocationCount;
    8982 }
    8983 
    8984 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8985 {
    8986  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8987  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8988  {
    8989  const VmaSuballocation& suballoc = suballocations1st[i];
    8990  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8991  {
    8992  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8993  {
    8994  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8995  return VK_ERROR_VALIDATION_FAILED_EXT;
    8996  }
    8997  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8998  {
    8999  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9000  return VK_ERROR_VALIDATION_FAILED_EXT;
    9001  }
    9002  }
    9003  }
    9004 
    9005  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9006  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9007  {
    9008  const VmaSuballocation& suballoc = suballocations2nd[i];
    9009  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9010  {
    9011  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9012  {
    9013  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9014  return VK_ERROR_VALIDATION_FAILED_EXT;
    9015  }
    9016  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9017  {
    9018  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9019  return VK_ERROR_VALIDATION_FAILED_EXT;
    9020  }
    9021  }
    9022  }
    9023 
    9024  return VK_SUCCESS;
    9025 }
    9026 
    9027 void VmaBlockMetadata_Linear::Alloc(
    9028  const VmaAllocationRequest& request,
    9029  VmaSuballocationType type,
    9030  VkDeviceSize allocSize,
    9031  bool upperAddress,
    9032  VmaAllocation hAllocation)
    9033 {
    9034  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9035 
    9036  if(upperAddress)
    9037  {
    9038  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9039  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9040  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9041  suballocations2nd.push_back(newSuballoc);
    9042  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9043  }
    9044  else
    9045  {
    9046  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9047 
    9048  // First allocation.
    9049  if(suballocations1st.empty())
    9050  {
    9051  suballocations1st.push_back(newSuballoc);
    9052  }
    9053  else
    9054  {
    9055  // New allocation at the end of 1st vector.
    9056  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9057  {
    9058  // Check if it fits before the end of the block.
    9059  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9063  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9064  {
    9065  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9066 
    9067  switch(m_2ndVectorMode)
    9068  {
    9069  case SECOND_VECTOR_EMPTY:
    9070  // First allocation from second part ring buffer.
    9071  VMA_ASSERT(suballocations2nd.empty());
    9072  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9073  break;
    9074  case SECOND_VECTOR_RING_BUFFER:
    9075  // 2-part ring buffer is already started.
    9076  VMA_ASSERT(!suballocations2nd.empty());
    9077  break;
    9078  case SECOND_VECTOR_DOUBLE_STACK:
    9079  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9080  break;
    9081  default:
    9082  VMA_ASSERT(0);
    9083  }
    9084 
    9085  suballocations2nd.push_back(newSuballoc);
    9086  }
    9087  else
    9088  {
    9089  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9090  }
    9091  }
    9092  }
    9093 
    9094  m_SumFreeSize -= newSuballoc.size;
    9095 }
    9096 
    9097 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9098 {
    9099  FreeAtOffset(allocation->GetOffset());
    9100 }
    9101 
    9102 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9103 {
    9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9105  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9106 
    9107  if(!suballocations1st.empty())
    9108  {
    9109  // First allocation: Mark it as next empty at the beginning.
    9110  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9111  if(firstSuballoc.offset == offset)
    9112  {
    9113  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9114  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9115  m_SumFreeSize += firstSuballoc.size;
    9116  ++m_1stNullItemsBeginCount;
    9117  CleanupAfterFree();
    9118  return;
    9119  }
    9120  }
    9121 
    9122  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9123  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9124  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9125  {
    9126  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9127  if(lastSuballoc.offset == offset)
    9128  {
    9129  m_SumFreeSize += lastSuballoc.size;
    9130  suballocations2nd.pop_back();
    9131  CleanupAfterFree();
    9132  return;
    9133  }
    9134  }
    9135  // Last allocation in 1st vector.
    9136  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9137  {
    9138  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9139  if(lastSuballoc.offset == offset)
    9140  {
    9141  m_SumFreeSize += lastSuballoc.size;
    9142  suballocations1st.pop_back();
    9143  CleanupAfterFree();
    9144  return;
    9145  }
    9146  }
    9147 
    9148  // Item from the middle of 1st vector.
    9149  {
    9150  VmaSuballocation refSuballoc;
    9151  refSuballoc.offset = offset;
    9152  // Rest of members stays uninitialized intentionally for better performance.
    9153  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9154  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9155  suballocations1st.end(),
    9156  refSuballoc);
    9157  if(it != suballocations1st.end())
    9158  {
    9159  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9160  it->hAllocation = VK_NULL_HANDLE;
    9161  ++m_1stNullItemsMiddleCount;
    9162  m_SumFreeSize += it->size;
    9163  CleanupAfterFree();
    9164  return;
    9165  }
    9166  }
    9167 
    9168  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9169  {
    9170  // Item from the middle of 2nd vector.
    9171  VmaSuballocation refSuballoc;
    9172  refSuballoc.offset = offset;
    9173  // Rest of members stays uninitialized intentionally for better performance.
    9174  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9175  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9176  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9177  if(it != suballocations2nd.end())
    9178  {
    9179  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9180  it->hAllocation = VK_NULL_HANDLE;
    9181  ++m_2ndNullItemsCount;
    9182  m_SumFreeSize += it->size;
    9183  CleanupAfterFree();
    9184  return;
    9185  }
    9186  }
    9187 
    9188  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9189 }
    9190 
    9191 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9192 {
    9193  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9194  const size_t suballocCount = AccessSuballocations1st().size();
    9195  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9196 }
    9197 
    9198 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9199 {
    9200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9202 
    9203  if(IsEmpty())
    9204  {
    9205  suballocations1st.clear();
    9206  suballocations2nd.clear();
    9207  m_1stNullItemsBeginCount = 0;
    9208  m_1stNullItemsMiddleCount = 0;
    9209  m_2ndNullItemsCount = 0;
    9210  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9211  }
    9212  else
    9213  {
    9214  const size_t suballoc1stCount = suballocations1st.size();
    9215  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9216  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9217 
    9218  // Find more null items at the beginning of 1st vector.
    9219  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9220  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9221  {
    9222  ++m_1stNullItemsBeginCount;
    9223  --m_1stNullItemsMiddleCount;
    9224  }
    9225 
    9226  // Find more null items at the end of 1st vector.
    9227  while(m_1stNullItemsMiddleCount > 0 &&
    9228  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --m_1stNullItemsMiddleCount;
    9231  suballocations1st.pop_back();
    9232  }
    9233 
    9234  // Find more null items at the end of 2nd vector.
    9235  while(m_2ndNullItemsCount > 0 &&
    9236  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9237  {
    9238  --m_2ndNullItemsCount;
    9239  suballocations2nd.pop_back();
    9240  }
    9241 
    9242  if(ShouldCompact1st())
    9243  {
    9244  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9245  size_t srcIndex = m_1stNullItemsBeginCount;
    9246  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9247  {
    9248  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9249  {
    9250  ++srcIndex;
    9251  }
    9252  if(dstIndex != srcIndex)
    9253  {
    9254  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9255  }
    9256  ++srcIndex;
    9257  }
    9258  suballocations1st.resize(nonNullItemCount);
    9259  m_1stNullItemsBeginCount = 0;
    9260  m_1stNullItemsMiddleCount = 0;
    9261  }
    9262 
    9263  // 2nd vector became empty.
    9264  if(suballocations2nd.empty())
    9265  {
    9266  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9267  }
    9268 
    9269  // 1st vector became empty.
    9270  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9271  {
    9272  suballocations1st.clear();
    9273  m_1stNullItemsBeginCount = 0;
    9274 
    9275  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9276  {
    9277  // Swap 1st with 2nd. Now 2nd is empty.
    9278  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9279  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9280  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9281  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9282  {
    9283  ++m_1stNullItemsBeginCount;
    9284  --m_1stNullItemsMiddleCount;
    9285  }
    9286  m_2ndNullItemsCount = 0;
    9287  m_1stVectorIndex ^= 1;
    9288  }
    9289  }
    9290  }
    9291 
    9292  VMA_HEAVY_ASSERT(Validate());
    9293 }
    9294 
    9295 
    9297 // class VmaBlockMetadata_Buddy
    9298 
    9299 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9300  VmaBlockMetadata(hAllocator),
    9301  m_Root(VMA_NULL),
    9302  m_AllocationCount(0),
    9303  m_FreeCount(1),
    9304  m_SumFreeSize(0)
    9305 {
    9306  memset(m_FreeList, 0, sizeof(m_FreeList));
    9307 }
    9308 
    9309 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9310 {
    9311  DeleteNode(m_Root);
    9312 }
    9313 
    9314 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9315 {
    9316  VmaBlockMetadata::Init(size);
    9317 
    9318  m_UsableSize = VmaPrevPow2(size);
    9319  m_SumFreeSize = m_UsableSize;
    9320 
    9321  // Calculate m_LevelCount.
    9322  m_LevelCount = 1;
    9323  while(m_LevelCount < MAX_LEVELS &&
    9324  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9325  {
    9326  ++m_LevelCount;
    9327  }
    9328 
    9329  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9330  rootNode->offset = 0;
    9331  rootNode->type = Node::TYPE_FREE;
    9332  rootNode->parent = VMA_NULL;
    9333  rootNode->buddy = VMA_NULL;
    9334 
    9335  m_Root = rootNode;
    9336  AddToFreeListFront(0, rootNode);
    9337 }
    9338 
    9339 bool VmaBlockMetadata_Buddy::Validate() const
    9340 {
    9341  // Validate tree.
    9342  ValidationContext ctx;
    9343  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9344  {
    9345  VMA_VALIDATE(false && "ValidateNode failed.");
    9346  }
    9347  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9348  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9349 
    9350  // Validate free node lists.
    9351  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9352  {
    9353  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9354  m_FreeList[level].front->free.prev == VMA_NULL);
    9355 
    9356  for(Node* node = m_FreeList[level].front;
    9357  node != VMA_NULL;
    9358  node = node->free.next)
    9359  {
    9360  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9361 
    9362  if(node->free.next == VMA_NULL)
    9363  {
    9364  VMA_VALIDATE(m_FreeList[level].back == node);
    9365  }
    9366  else
    9367  {
    9368  VMA_VALIDATE(node->free.next->free.prev == node);
    9369  }
    9370  }
    9371  }
    9372 
    9373  // Validate that free lists ar higher levels are empty.
    9374  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9375  {
    9376  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9377  }
    9378 
    9379  return true;
    9380 }
    9381 
    9382 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9383 {
    9384  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9385  {
    9386  if(m_FreeList[level].front != VMA_NULL)
    9387  {
    9388  return LevelToNodeSize(level);
    9389  }
    9390  }
    9391  return 0;
    9392 }
    9393 
    9394 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9395 {
    9396  const VkDeviceSize unusableSize = GetUnusableSize();
    9397 
    9398  outInfo.blockCount = 1;
    9399 
    9400  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9401  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9402 
    9403  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9404  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9405  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9406 
    9407  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9408 
    9409  if(unusableSize > 0)
    9410  {
    9411  ++outInfo.unusedRangeCount;
    9412  outInfo.unusedBytes += unusableSize;
    9413  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9415  }
    9416 }
    9417 
    9418 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9419 {
    9420  const VkDeviceSize unusableSize = GetUnusableSize();
    9421 
    9422  inoutStats.size += GetSize();
    9423  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9424  inoutStats.allocationCount += m_AllocationCount;
    9425  inoutStats.unusedRangeCount += m_FreeCount;
    9426  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9427 
    9428  if(unusableSize > 0)
    9429  {
    9430  ++inoutStats.unusedRangeCount;
    9431  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9432  }
    9433 }
    9434 
    9435 #if VMA_STATS_STRING_ENABLED
    9436 
    9437 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9438 {
    9439  // TODO optimize
    9440  VmaStatInfo stat;
    9441  CalcAllocationStatInfo(stat);
    9442 
    9443  PrintDetailedMap_Begin(
    9444  json,
    9445  stat.unusedBytes,
    9446  stat.allocationCount,
    9447  stat.unusedRangeCount);
    9448 
    9449  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9450 
    9451  const VkDeviceSize unusableSize = GetUnusableSize();
    9452  if(unusableSize > 0)
    9453  {
    9454  PrintDetailedMap_UnusedRange(json,
    9455  m_UsableSize, // offset
    9456  unusableSize); // size
    9457  }
    9458 
    9459  PrintDetailedMap_End(json);
    9460 }
    9461 
    9462 #endif // #if VMA_STATS_STRING_ENABLED
    9463 
    9464 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9465  uint32_t currentFrameIndex,
    9466  uint32_t frameInUseCount,
    9467  VkDeviceSize bufferImageGranularity,
    9468  VkDeviceSize allocSize,
    9469  VkDeviceSize allocAlignment,
    9470  bool upperAddress,
    9471  VmaSuballocationType allocType,
    9472  bool canMakeOtherLost,
    9473  uint32_t strategy,
    9474  VmaAllocationRequest* pAllocationRequest)
    9475 {
    9476  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9477 
    9478  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9479  // Whenever it might be an OPTIMAL image...
    9480  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9481  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9482  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9483  {
    9484  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9485  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9486  }
    9487 
    9488  if(allocSize > m_UsableSize)
    9489  {
    9490  return false;
    9491  }
    9492 
    9493  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9494  for(uint32_t level = targetLevel + 1; level--; )
    9495  {
    9496  for(Node* freeNode = m_FreeList[level].front;
    9497  freeNode != VMA_NULL;
    9498  freeNode = freeNode->free.next)
    9499  {
    9500  if(freeNode->offset % allocAlignment == 0)
    9501  {
    9502  pAllocationRequest->offset = freeNode->offset;
    9503  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9504  pAllocationRequest->sumItemSize = 0;
    9505  pAllocationRequest->itemsToMakeLostCount = 0;
    9506  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9507  return true;
    9508  }
    9509  }
    9510  }
    9511 
    9512  return false;
    9513 }
    9514 
    9515 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9516  uint32_t currentFrameIndex,
    9517  uint32_t frameInUseCount,
    9518  VmaAllocationRequest* pAllocationRequest)
    9519 {
    9520  /*
    9521  Lost allocations are not supported in buddy allocator at the moment.
    9522  Support might be added in the future.
    9523  */
    9524  return pAllocationRequest->itemsToMakeLostCount == 0;
    9525 }
    9526 
    9527 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return 0;
    9534 }
    9535 
    9536 void VmaBlockMetadata_Buddy::Alloc(
    9537  const VmaAllocationRequest& request,
    9538  VmaSuballocationType type,
    9539  VkDeviceSize allocSize,
    9540  bool upperAddress,
    9541  VmaAllocation hAllocation)
    9542 {
    9543  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9544  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9545 
    9546  Node* currNode = m_FreeList[currLevel].front;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  while(currNode->offset != request.offset)
    9549  {
    9550  currNode = currNode->free.next;
    9551  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9552  }
    9553 
    9554  // Go down, splitting free nodes.
    9555  while(currLevel < targetLevel)
    9556  {
    9557  // currNode is already first free node at currLevel.
    9558  // Remove it from list of free nodes at this currLevel.
    9559  RemoveFromFreeList(currLevel, currNode);
    9560 
    9561  const uint32_t childrenLevel = currLevel + 1;
    9562 
    9563  // Create two free sub-nodes.
    9564  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9565  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9566 
    9567  leftChild->offset = currNode->offset;
    9568  leftChild->type = Node::TYPE_FREE;
    9569  leftChild->parent = currNode;
    9570  leftChild->buddy = rightChild;
    9571 
    9572  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9573  rightChild->type = Node::TYPE_FREE;
    9574  rightChild->parent = currNode;
    9575  rightChild->buddy = leftChild;
    9576 
    9577  // Convert current currNode to split type.
    9578  currNode->type = Node::TYPE_SPLIT;
    9579  currNode->split.leftChild = leftChild;
    9580 
    9581  // Add child nodes to free list. Order is important!
    9582  AddToFreeListFront(childrenLevel, rightChild);
    9583  AddToFreeListFront(childrenLevel, leftChild);
    9584 
    9585  ++m_FreeCount;
    9586  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9587  ++currLevel;
    9588  currNode = m_FreeList[currLevel].front;
    9589 
    9590  /*
    9591  We can be sure that currNode, as left child of node previously split,
    9592  also fullfills the alignment requirement.
    9593  */
    9594  }
    9595 
    9596  // Remove from free list.
    9597  VMA_ASSERT(currLevel == targetLevel &&
    9598  currNode != VMA_NULL &&
    9599  currNode->type == Node::TYPE_FREE);
    9600  RemoveFromFreeList(currLevel, currNode);
    9601 
    9602  // Convert to allocation node.
    9603  currNode->type = Node::TYPE_ALLOCATION;
    9604  currNode->allocation.alloc = hAllocation;
    9605 
    9606  ++m_AllocationCount;
    9607  --m_FreeCount;
    9608  m_SumFreeSize -= allocSize;
    9609 }
    9610 
    9611 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9612 {
    9613  if(node->type == Node::TYPE_SPLIT)
    9614  {
    9615  DeleteNode(node->split.leftChild->buddy);
    9616  DeleteNode(node->split.leftChild);
    9617  }
    9618 
    9619  vma_delete(GetAllocationCallbacks(), node);
    9620 }
    9621 
    9622 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9623 {
    9624  VMA_VALIDATE(level < m_LevelCount);
    9625  VMA_VALIDATE(curr->parent == parent);
    9626  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9627  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9628  switch(curr->type)
    9629  {
    9630  case Node::TYPE_FREE:
    9631  // curr->free.prev, next are validated separately.
    9632  ctx.calculatedSumFreeSize += levelNodeSize;
    9633  ++ctx.calculatedFreeCount;
    9634  break;
    9635  case Node::TYPE_ALLOCATION:
    9636  ++ctx.calculatedAllocationCount;
    9637  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9638  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9639  break;
    9640  case Node::TYPE_SPLIT:
    9641  {
    9642  const uint32_t childrenLevel = level + 1;
    9643  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9644  const Node* const leftChild = curr->split.leftChild;
    9645  VMA_VALIDATE(leftChild != VMA_NULL);
    9646  VMA_VALIDATE(leftChild->offset == curr->offset);
    9647  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9648  {
    9649  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9650  }
    9651  const Node* const rightChild = leftChild->buddy;
    9652  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9653  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9654  {
    9655  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9656  }
    9657  }
    9658  break;
    9659  default:
    9660  return false;
    9661  }
    9662 
    9663  return true;
    9664 }
    9665 
    9666 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9667 {
    9668  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9669  uint32_t level = 0;
    9670  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9671  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9672  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9673  {
    9674  ++level;
    9675  currLevelNodeSize = nextLevelNodeSize;
    9676  nextLevelNodeSize = currLevelNodeSize >> 1;
    9677  }
    9678  return level;
    9679 }
    9680 
    9681 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9682 {
    9683  // Find node and level.
    9684  Node* node = m_Root;
    9685  VkDeviceSize nodeOffset = 0;
    9686  uint32_t level = 0;
    9687  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9688  while(node->type == Node::TYPE_SPLIT)
    9689  {
    9690  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9691  if(offset < nodeOffset + nextLevelSize)
    9692  {
    9693  node = node->split.leftChild;
    9694  }
    9695  else
    9696  {
    9697  node = node->split.leftChild->buddy;
    9698  nodeOffset += nextLevelSize;
    9699  }
    9700  ++level;
    9701  levelNodeSize = nextLevelSize;
    9702  }
    9703 
    9704  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9705  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9706 
    9707  ++m_FreeCount;
    9708  --m_AllocationCount;
    9709  m_SumFreeSize += alloc->GetSize();
    9710 
    9711  node->type = Node::TYPE_FREE;
    9712 
    9713  // Join free nodes if possible.
    9714  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9715  {
    9716  RemoveFromFreeList(level, node->buddy);
    9717  Node* const parent = node->parent;
    9718 
    9719  vma_delete(GetAllocationCallbacks(), node->buddy);
    9720  vma_delete(GetAllocationCallbacks(), node);
    9721  parent->type = Node::TYPE_FREE;
    9722 
    9723  node = parent;
    9724  --level;
    9725  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9726  --m_FreeCount;
    9727  }
    9728 
    9729  AddToFreeListFront(level, node);
    9730 }
    9731 
    9732 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9733 {
    9734  switch(node->type)
    9735  {
    9736  case Node::TYPE_FREE:
    9737  ++outInfo.unusedRangeCount;
    9738  outInfo.unusedBytes += levelNodeSize;
    9739  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9740  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9741  break;
    9742  case Node::TYPE_ALLOCATION:
    9743  {
    9744  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9745  ++outInfo.allocationCount;
    9746  outInfo.usedBytes += allocSize;
    9747  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9748  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9749 
    9750  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9751  if(unusedRangeSize > 0)
    9752  {
    9753  ++outInfo.unusedRangeCount;
    9754  outInfo.unusedBytes += unusedRangeSize;
    9755  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9756  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9757  }
    9758  }
    9759  break;
    9760  case Node::TYPE_SPLIT:
    9761  {
    9762  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9763  const Node* const leftChild = node->split.leftChild;
    9764  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9765  const Node* const rightChild = leftChild->buddy;
    9766  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9767  }
    9768  break;
    9769  default:
    9770  VMA_ASSERT(0);
    9771  }
    9772 }
    9773 
    9774 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9775 {
    9776  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9777 
    9778  // List is empty.
    9779  Node* const frontNode = m_FreeList[level].front;
    9780  if(frontNode == VMA_NULL)
    9781  {
    9782  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9783  node->free.prev = node->free.next = VMA_NULL;
    9784  m_FreeList[level].front = m_FreeList[level].back = node;
    9785  }
    9786  else
    9787  {
    9788  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9789  node->free.prev = VMA_NULL;
    9790  node->free.next = frontNode;
    9791  frontNode->free.prev = node;
    9792  m_FreeList[level].front = node;
    9793  }
    9794 }
    9795 
    9796 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9797 {
    9798  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9799 
    9800  // It is at the front.
    9801  if(node->free.prev == VMA_NULL)
    9802  {
    9803  VMA_ASSERT(m_FreeList[level].front == node);
    9804  m_FreeList[level].front = node->free.next;
    9805  }
    9806  else
    9807  {
    9808  Node* const prevFreeNode = node->free.prev;
    9809  VMA_ASSERT(prevFreeNode->free.next == node);
    9810  prevFreeNode->free.next = node->free.next;
    9811  }
    9812 
    9813  // It is at the back.
    9814  if(node->free.next == VMA_NULL)
    9815  {
    9816  VMA_ASSERT(m_FreeList[level].back == node);
    9817  m_FreeList[level].back = node->free.prev;
    9818  }
    9819  else
    9820  {
    9821  Node* const nextFreeNode = node->free.next;
    9822  VMA_ASSERT(nextFreeNode->free.prev == node);
    9823  nextFreeNode->free.prev = node->free.prev;
    9824  }
    9825 }
    9826 
    9827 #if VMA_STATS_STRING_ENABLED
    9828 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9829 {
    9830  switch(node->type)
    9831  {
    9832  case Node::TYPE_FREE:
    9833  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  {
    9837  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9838  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9839  if(allocSize < levelNodeSize)
    9840  {
    9841  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9842  }
    9843  }
    9844  break;
    9845  case Node::TYPE_SPLIT:
    9846  {
    9847  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9848  const Node* const leftChild = node->split.leftChild;
    9849  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9850  const Node* const rightChild = leftChild->buddy;
    9851  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9852  }
    9853  break;
    9854  default:
    9855  VMA_ASSERT(0);
    9856  }
    9857 }
    9858 #endif // #if VMA_STATS_STRING_ENABLED
    9859 
    9860 
    9862 // class VmaDeviceMemoryBlock
    9863 
    9864 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9865  m_pMetadata(VMA_NULL),
    9866  m_MemoryTypeIndex(UINT32_MAX),
    9867  m_Id(0),
    9868  m_hMemory(VK_NULL_HANDLE),
    9869  m_MapCount(0),
    9870  m_pMappedData(VMA_NULL)
    9871 {
    9872 }
    9873 
    9874 void VmaDeviceMemoryBlock::Init(
    9875  VmaAllocator hAllocator,
    9876  uint32_t newMemoryTypeIndex,
    9877  VkDeviceMemory newMemory,
    9878  VkDeviceSize newSize,
    9879  uint32_t id,
    9880  uint32_t algorithm)
    9881 {
    9882  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9883 
    9884  m_MemoryTypeIndex = newMemoryTypeIndex;
    9885  m_Id = id;
    9886  m_hMemory = newMemory;
    9887 
    9888  switch(algorithm)
    9889  {
    9891  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9892  break;
    9894  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9895  break;
    9896  default:
    9897  VMA_ASSERT(0);
    9898  // Fall-through.
    9899  case 0:
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9901  }
    9902  m_pMetadata->Init(newSize);
    9903 }
    9904 
    9905 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9906 {
    9907  // This is the most important assert in the entire library.
    9908  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9909  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9910 
    9911  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9912  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9913  m_hMemory = VK_NULL_HANDLE;
    9914 
    9915  vma_delete(allocator, m_pMetadata);
    9916  m_pMetadata = VMA_NULL;
    9917 }
    9918 
    9919 bool VmaDeviceMemoryBlock::Validate() const
    9920 {
    9921  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9922  (m_pMetadata->GetSize() != 0));
    9923 
    9924  return m_pMetadata->Validate();
    9925 }
    9926 
    9927 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9928 {
    9929  void* pData = nullptr;
    9930  VkResult res = Map(hAllocator, 1, &pData);
    9931  if(res != VK_SUCCESS)
    9932  {
    9933  return res;
    9934  }
    9935 
    9936  res = m_pMetadata->CheckCorruption(pData);
    9937 
    9938  Unmap(hAllocator, 1);
    9939 
    9940  return res;
    9941 }
    9942 
    9943 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9944 {
    9945  if(count == 0)
    9946  {
    9947  return VK_SUCCESS;
    9948  }
    9949 
    9950  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9951  if(m_MapCount != 0)
    9952  {
    9953  m_MapCount += count;
    9954  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9955  if(ppData != VMA_NULL)
    9956  {
    9957  *ppData = m_pMappedData;
    9958  }
    9959  return VK_SUCCESS;
    9960  }
    9961  else
    9962  {
    9963  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9964  hAllocator->m_hDevice,
    9965  m_hMemory,
    9966  0, // offset
    9967  VK_WHOLE_SIZE,
    9968  0, // flags
    9969  &m_pMappedData);
    9970  if(result == VK_SUCCESS)
    9971  {
    9972  if(ppData != VMA_NULL)
    9973  {
    9974  *ppData = m_pMappedData;
    9975  }
    9976  m_MapCount = count;
    9977  }
    9978  return result;
    9979  }
    9980 }
    9981 
    9982 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9983 {
    9984  if(count == 0)
    9985  {
    9986  return;
    9987  }
    9988 
    9989  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9990  if(m_MapCount >= count)
    9991  {
    9992  m_MapCount -= count;
    9993  if(m_MapCount == 0)
    9994  {
    9995  m_pMappedData = VMA_NULL;
    9996  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9997  }
    9998  }
    9999  else
    10000  {
    10001  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10002  }
    10003 }
    10004 
    10005 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10006 {
    10007  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10008  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10009 
    10010  void* pData;
    10011  VkResult res = Map(hAllocator, 1, &pData);
    10012  if(res != VK_SUCCESS)
    10013  {
    10014  return res;
    10015  }
    10016 
    10017  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10018  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10019 
    10020  Unmap(hAllocator, 1);
    10021 
    10022  return VK_SUCCESS;
    10023 }
    10024 
    10025 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10026 {
    10027  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10028  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10029 
    10030  void* pData;
    10031  VkResult res = Map(hAllocator, 1, &pData);
    10032  if(res != VK_SUCCESS)
    10033  {
    10034  return res;
    10035  }
    10036 
    10037  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10040  }
    10041  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10042  {
    10043  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10044  }
    10045 
    10046  Unmap(hAllocator, 1);
    10047 
    10048  return VK_SUCCESS;
    10049 }
    10050 
    10051 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10052  const VmaAllocator hAllocator,
    10053  const VmaAllocation hAllocation,
    10054  VkBuffer hBuffer)
    10055 {
    10056  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10057  hAllocation->GetBlock() == this);
    10058  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10059  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10060  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10061  hAllocator->m_hDevice,
    10062  hBuffer,
    10063  m_hMemory,
    10064  hAllocation->GetOffset());
    10065 }
    10066 
    10067 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10068  const VmaAllocator hAllocator,
    10069  const VmaAllocation hAllocation,
    10070  VkImage hImage)
    10071 {
    10072  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10073  hAllocation->GetBlock() == this);
    10074  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10075  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10076  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10077  hAllocator->m_hDevice,
    10078  hImage,
    10079  m_hMemory,
    10080  hAllocation->GetOffset());
    10081 }
    10082 
    10083 static void InitStatInfo(VmaStatInfo& outInfo)
    10084 {
    10085  memset(&outInfo, 0, sizeof(outInfo));
    10086  outInfo.allocationSizeMin = UINT64_MAX;
    10087  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10088 }
    10089 
    10090 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10091 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10092 {
    10093  inoutInfo.blockCount += srcInfo.blockCount;
    10094  inoutInfo.allocationCount += srcInfo.allocationCount;
    10095  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10096  inoutInfo.usedBytes += srcInfo.usedBytes;
    10097  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10098  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10099  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10100  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10101  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10102 }
    10103 
    10104 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10105 {
    10106  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10107  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10108  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10109  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10110 }
    10111 
    10112 VmaPool_T::VmaPool_T(
    10113  VmaAllocator hAllocator,
    10114  const VmaPoolCreateInfo& createInfo,
    10115  VkDeviceSize preferredBlockSize) :
    10116  m_BlockVector(
    10117  hAllocator,
    10118  createInfo.memoryTypeIndex,
    10119  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10120  createInfo.minBlockCount,
    10121  createInfo.maxBlockCount,
    10122  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10123  createInfo.frameInUseCount,
    10124  true, // isCustomPool
    10125  createInfo.blockSize != 0, // explicitBlockSize
    10126  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10127  m_Id(0)
    10128 {
    10129 }
    10130 
    10131 VmaPool_T::~VmaPool_T()
    10132 {
    10133 }
    10134 
    10135 #if VMA_STATS_STRING_ENABLED
    10136 
    10137 #endif // #if VMA_STATS_STRING_ENABLED
    10138 
    10139 VmaBlockVector::VmaBlockVector(
    10140  VmaAllocator hAllocator,
    10141  uint32_t memoryTypeIndex,
    10142  VkDeviceSize preferredBlockSize,
    10143  size_t minBlockCount,
    10144  size_t maxBlockCount,
    10145  VkDeviceSize bufferImageGranularity,
    10146  uint32_t frameInUseCount,
    10147  bool isCustomPool,
    10148  bool explicitBlockSize,
    10149  uint32_t algorithm) :
    10150  m_hAllocator(hAllocator),
    10151  m_MemoryTypeIndex(memoryTypeIndex),
    10152  m_PreferredBlockSize(preferredBlockSize),
    10153  m_MinBlockCount(minBlockCount),
    10154  m_MaxBlockCount(maxBlockCount),
    10155  m_BufferImageGranularity(bufferImageGranularity),
    10156  m_FrameInUseCount(frameInUseCount),
    10157  m_IsCustomPool(isCustomPool),
    10158  m_ExplicitBlockSize(explicitBlockSize),
    10159  m_Algorithm(algorithm),
    10160  m_HasEmptyBlock(false),
    10161  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10162  m_pDefragmentator(VMA_NULL),
    10163  m_NextBlockId(0)
    10164 {
    10165 }
    10166 
    10167 VmaBlockVector::~VmaBlockVector()
    10168 {
    10169  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10170 
    10171  for(size_t i = m_Blocks.size(); i--; )
    10172  {
    10173  m_Blocks[i]->Destroy(m_hAllocator);
    10174  vma_delete(m_hAllocator, m_Blocks[i]);
    10175  }
    10176 }
    10177 
    10178 VkResult VmaBlockVector::CreateMinBlocks()
    10179 {
    10180  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10181  {
    10182  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10183  if(res != VK_SUCCESS)
    10184  {
    10185  return res;
    10186  }
    10187  }
    10188  return VK_SUCCESS;
    10189 }
    10190 
    10191 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10192 {
    10193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10194 
    10195  const size_t blockCount = m_Blocks.size();
    10196 
    10197  pStats->size = 0;
    10198  pStats->unusedSize = 0;
    10199  pStats->allocationCount = 0;
    10200  pStats->unusedRangeCount = 0;
    10201  pStats->unusedRangeSizeMax = 0;
    10202  pStats->blockCount = blockCount;
    10203 
    10204  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10205  {
    10206  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10207  VMA_ASSERT(pBlock);
    10208  VMA_HEAVY_ASSERT(pBlock->Validate());
    10209  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10210  }
    10211 }
    10212 
    10213 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10214 {
    10215  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10216  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10217  (VMA_DEBUG_MARGIN > 0) &&
    10218  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10219 }
    10220 
    10221 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10222 
    10223 VkResult VmaBlockVector::Allocate(
    10224  VmaPool hCurrentPool,
    10225  uint32_t currentFrameIndex,
    10226  VkDeviceSize size,
    10227  VkDeviceSize alignment,
    10228  const VmaAllocationCreateInfo& createInfo,
    10229  VmaSuballocationType suballocType,
    10230  VmaAllocation* pAllocation)
    10231 {
    10232  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10233  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10234  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10235  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10236  const bool canCreateNewBlock =
    10237  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10238  (m_Blocks.size() < m_MaxBlockCount);
    10239  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10240 
    10241  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10242  // Which in turn is available only when maxBlockCount = 1.
    10243  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10244  {
    10245  canMakeOtherLost = false;
    10246  }
    10247 
    10248  // Upper address can only be used with linear allocator and within single memory block.
    10249  if(isUpperAddress &&
    10250  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10251  {
    10252  return VK_ERROR_FEATURE_NOT_PRESENT;
    10253  }
    10254 
    10255  // Validate strategy.
    10256  switch(strategy)
    10257  {
    10258  case 0:
    10260  break;
    10264  break;
    10265  default:
    10266  return VK_ERROR_FEATURE_NOT_PRESENT;
    10267  }
    10268 
    10269  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10270  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10271  {
    10272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10273  }
    10274 
    10275  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10276 
    10277  /*
    10278  Under certain condition, this whole section can be skipped for optimization, so
    10279  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10280  e.g. for custom pools with linear algorithm.
    10281  */
    10282  if(!canMakeOtherLost || canCreateNewBlock)
    10283  {
    10284  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10285  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10287 
    10288  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10289  {
    10290  // Use only last block.
    10291  if(!m_Blocks.empty())
    10292  {
    10293  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10294  VMA_ASSERT(pCurrBlock);
    10295  VkResult res = AllocateFromBlock(
    10296  pCurrBlock,
    10297  hCurrentPool,
    10298  currentFrameIndex,
    10299  size,
    10300  alignment,
    10301  allocFlagsCopy,
    10302  createInfo.pUserData,
    10303  suballocType,
    10304  strategy,
    10305  pAllocation);
    10306  if(res == VK_SUCCESS)
    10307  {
    10308  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10309  return VK_SUCCESS;
    10310  }
    10311  }
    10312  }
    10313  else
    10314  {
    10316  {
    10317  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10318  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10319  {
    10320  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10321  VMA_ASSERT(pCurrBlock);
    10322  VkResult res = AllocateFromBlock(
    10323  pCurrBlock,
    10324  hCurrentPool,
    10325  currentFrameIndex,
    10326  size,
    10327  alignment,
    10328  allocFlagsCopy,
    10329  createInfo.pUserData,
    10330  suballocType,
    10331  strategy,
    10332  pAllocation);
    10333  if(res == VK_SUCCESS)
    10334  {
    10335  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10336  return VK_SUCCESS;
    10337  }
    10338  }
    10339  }
    10340  else // WORST_FIT, FIRST_FIT
    10341  {
    10342  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10343  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10344  {
    10345  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10346  VMA_ASSERT(pCurrBlock);
    10347  VkResult res = AllocateFromBlock(
    10348  pCurrBlock,
    10349  hCurrentPool,
    10350  currentFrameIndex,
    10351  size,
    10352  alignment,
    10353  allocFlagsCopy,
    10354  createInfo.pUserData,
    10355  suballocType,
    10356  strategy,
    10357  pAllocation);
    10358  if(res == VK_SUCCESS)
    10359  {
    10360  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10361  return VK_SUCCESS;
    10362  }
    10363  }
    10364  }
    10365  }
    10366 
    10367  // 2. Try to create new block.
    10368  if(canCreateNewBlock)
    10369  {
    10370  // Calculate optimal size for new block.
    10371  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10372  uint32_t newBlockSizeShift = 0;
    10373  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10374 
    10375  if(!m_ExplicitBlockSize)
    10376  {
    10377  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10378  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10379  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10380  {
    10381  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10382  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10383  {
    10384  newBlockSize = smallerNewBlockSize;
    10385  ++newBlockSizeShift;
    10386  }
    10387  else
    10388  {
    10389  break;
    10390  }
    10391  }
    10392  }
    10393 
    10394  size_t newBlockIndex = 0;
    10395  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10396  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10397  if(!m_ExplicitBlockSize)
    10398  {
    10399  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10400  {
    10401  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10402  if(smallerNewBlockSize >= size)
    10403  {
    10404  newBlockSize = smallerNewBlockSize;
    10405  ++newBlockSizeShift;
    10406  res = CreateBlock(newBlockSize, &newBlockIndex);
    10407  }
    10408  else
    10409  {
    10410  break;
    10411  }
    10412  }
    10413  }
    10414 
    10415  if(res == VK_SUCCESS)
    10416  {
    10417  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10418  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10419 
    10420  res = AllocateFromBlock(
    10421  pBlock,
    10422  hCurrentPool,
    10423  currentFrameIndex,
    10424  size,
    10425  alignment,
    10426  allocFlagsCopy,
    10427  createInfo.pUserData,
    10428  suballocType,
    10429  strategy,
    10430  pAllocation);
    10431  if(res == VK_SUCCESS)
    10432  {
    10433  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10434  return VK_SUCCESS;
    10435  }
    10436  else
    10437  {
    10438  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10439  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10440  }
    10441  }
    10442  }
    10443  }
    10444 
    10445  // 3. Try to allocate from existing blocks with making other allocations lost.
    10446  if(canMakeOtherLost)
    10447  {
    10448  uint32_t tryIndex = 0;
    10449  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10450  {
    10451  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10452  VmaAllocationRequest bestRequest = {};
    10453  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10454 
    10455  // 1. Search existing allocations.
    10457  {
    10458  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10459  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10460  {
    10461  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10462  VMA_ASSERT(pCurrBlock);
    10463  VmaAllocationRequest currRequest = {};
    10464  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10465  currentFrameIndex,
    10466  m_FrameInUseCount,
    10467  m_BufferImageGranularity,
    10468  size,
    10469  alignment,
    10470  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10471  suballocType,
    10472  canMakeOtherLost,
    10473  strategy,
    10474  &currRequest))
    10475  {
    10476  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10477  if(pBestRequestBlock == VMA_NULL ||
    10478  currRequestCost < bestRequestCost)
    10479  {
    10480  pBestRequestBlock = pCurrBlock;
    10481  bestRequest = currRequest;
    10482  bestRequestCost = currRequestCost;
    10483 
    10484  if(bestRequestCost == 0)
    10485  {
    10486  break;
    10487  }
    10488  }
    10489  }
    10490  }
    10491  }
    10492  else // WORST_FIT, FIRST_FIT
    10493  {
    10494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10496  {
    10497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10498  VMA_ASSERT(pCurrBlock);
    10499  VmaAllocationRequest currRequest = {};
    10500  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10501  currentFrameIndex,
    10502  m_FrameInUseCount,
    10503  m_BufferImageGranularity,
    10504  size,
    10505  alignment,
    10506  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10507  suballocType,
    10508  canMakeOtherLost,
    10509  strategy,
    10510  &currRequest))
    10511  {
    10512  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10513  if(pBestRequestBlock == VMA_NULL ||
    10514  currRequestCost < bestRequestCost ||
    10516  {
    10517  pBestRequestBlock = pCurrBlock;
    10518  bestRequest = currRequest;
    10519  bestRequestCost = currRequestCost;
    10520 
    10521  if(bestRequestCost == 0 ||
    10523  {
    10524  break;
    10525  }
    10526  }
    10527  }
    10528  }
    10529  }
    10530 
    10531  if(pBestRequestBlock != VMA_NULL)
    10532  {
    10533  if(mapped)
    10534  {
    10535  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10536  if(res != VK_SUCCESS)
    10537  {
    10538  return res;
    10539  }
    10540  }
    10541 
    10542  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10543  currentFrameIndex,
    10544  m_FrameInUseCount,
    10545  &bestRequest))
    10546  {
    10547  // We no longer have an empty Allocation.
    10548  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10549  {
    10550  m_HasEmptyBlock = false;
    10551  }
    10552  // Allocate from this pBlock.
    10553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10554  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10555  (*pAllocation)->InitBlockAllocation(
    10556  hCurrentPool,
    10557  pBestRequestBlock,
    10558  bestRequest.offset,
    10559  alignment,
    10560  size,
    10561  suballocType,
    10562  mapped,
    10563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10564  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10565  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10566  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10567  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10568  {
    10569  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10570  }
    10571  if(IsCorruptionDetectionEnabled())
    10572  {
    10573  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10574  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10575  }
    10576  return VK_SUCCESS;
    10577  }
    10578  // else: Some allocations must have been touched while we are here. Next try.
    10579  }
    10580  else
    10581  {
    10582  // Could not find place in any of the blocks - break outer loop.
    10583  break;
    10584  }
    10585  }
    10586  /* Maximum number of tries exceeded - a very unlike event when many other
    10587  threads are simultaneously touching allocations making it impossible to make
    10588  lost at the same time as we try to allocate. */
    10589  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10590  {
    10591  return VK_ERROR_TOO_MANY_OBJECTS;
    10592  }
    10593  }
    10594 
    10595  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10596 }
    10597 
    10598 void VmaBlockVector::Free(
    10599  VmaAllocation hAllocation)
    10600 {
    10601  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10602 
    10603  // Scope for lock.
    10604  {
    10605  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10606 
    10607  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10608 
    10609  if(IsCorruptionDetectionEnabled())
    10610  {
    10611  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10612  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10613  }
    10614 
    10615  if(hAllocation->IsPersistentMap())
    10616  {
    10617  pBlock->Unmap(m_hAllocator, 1);
    10618  }
    10619 
    10620  pBlock->m_pMetadata->Free(hAllocation);
    10621  VMA_HEAVY_ASSERT(pBlock->Validate());
    10622 
    10623  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10624 
    10625  // pBlock became empty after this deallocation.
    10626  if(pBlock->m_pMetadata->IsEmpty())
    10627  {
    10628  // Already has empty Allocation. We don't want to have two, so delete this one.
    10629  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10630  {
    10631  pBlockToDelete = pBlock;
    10632  Remove(pBlock);
    10633  }
    10634  // We now have first empty block.
    10635  else
    10636  {
    10637  m_HasEmptyBlock = true;
    10638  }
    10639  }
    10640  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10641  // (This is optional, heuristics.)
    10642  else if(m_HasEmptyBlock)
    10643  {
    10644  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10645  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10646  {
    10647  pBlockToDelete = pLastBlock;
    10648  m_Blocks.pop_back();
    10649  m_HasEmptyBlock = false;
    10650  }
    10651  }
    10652 
    10653  IncrementallySortBlocks();
    10654  }
    10655 
    10656  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10657  // lock, for performance reason.
    10658  if(pBlockToDelete != VMA_NULL)
    10659  {
    10660  VMA_DEBUG_LOG(" Deleted empty allocation");
    10661  pBlockToDelete->Destroy(m_hAllocator);
    10662  vma_delete(m_hAllocator, pBlockToDelete);
    10663  }
    10664 }
    10665 
    10666 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10667 {
    10668  VkDeviceSize result = 0;
    10669  for(size_t i = m_Blocks.size(); i--; )
    10670  {
    10671  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10672  if(result >= m_PreferredBlockSize)
    10673  {
    10674  break;
    10675  }
    10676  }
    10677  return result;
    10678 }
    10679 
    10680 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10681 {
    10682  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10683  {
    10684  if(m_Blocks[blockIndex] == pBlock)
    10685  {
    10686  VmaVectorRemove(m_Blocks, blockIndex);
    10687  return;
    10688  }
    10689  }
    10690  VMA_ASSERT(0);
    10691 }
    10692 
    10693 void VmaBlockVector::IncrementallySortBlocks()
    10694 {
    10695  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10696  {
    10697  // Bubble sort only until first swap.
    10698  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10699  {
    10700  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10701  {
    10702  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10703  return;
    10704  }
    10705  }
    10706  }
    10707 }
    10708 
    10709 VkResult VmaBlockVector::AllocateFromBlock(
    10710  VmaDeviceMemoryBlock* pBlock,
    10711  VmaPool hCurrentPool,
    10712  uint32_t currentFrameIndex,
    10713  VkDeviceSize size,
    10714  VkDeviceSize alignment,
    10715  VmaAllocationCreateFlags allocFlags,
    10716  void* pUserData,
    10717  VmaSuballocationType suballocType,
    10718  uint32_t strategy,
    10719  VmaAllocation* pAllocation)
    10720 {
    10721  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10722  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10723  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10724  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10725 
    10726  VmaAllocationRequest currRequest = {};
    10727  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10728  currentFrameIndex,
    10729  m_FrameInUseCount,
    10730  m_BufferImageGranularity,
    10731  size,
    10732  alignment,
    10733  isUpperAddress,
    10734  suballocType,
    10735  false, // canMakeOtherLost
    10736  strategy,
    10737  &currRequest))
    10738  {
    10739  // Allocate from pCurrBlock.
    10740  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10741 
    10742  if(mapped)
    10743  {
    10744  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10745  if(res != VK_SUCCESS)
    10746  {
    10747  return res;
    10748  }
    10749  }
    10750 
    10751  // We no longer have an empty Allocation.
    10752  if(pBlock->m_pMetadata->IsEmpty())
    10753  {
    10754  m_HasEmptyBlock = false;
    10755  }
    10756 
    10757  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10758  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10759  (*pAllocation)->InitBlockAllocation(
    10760  hCurrentPool,
    10761  pBlock,
    10762  currRequest.offset,
    10763  alignment,
    10764  size,
    10765  suballocType,
    10766  mapped,
    10767  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10768  VMA_HEAVY_ASSERT(pBlock->Validate());
    10769  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10771  {
    10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10773  }
    10774  if(IsCorruptionDetectionEnabled())
    10775  {
    10776  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10778  }
    10779  return VK_SUCCESS;
    10780  }
    10781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10782 }
    10783 
    10784 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10785 {
    10786  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10787  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10788  allocInfo.allocationSize = blockSize;
    10789  VkDeviceMemory mem = VK_NULL_HANDLE;
    10790  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10791  if(res < 0)
    10792  {
    10793  return res;
    10794  }
    10795 
    10796  // New VkDeviceMemory successfully created.
    10797 
    10798  // Create new Allocation for it.
    10799  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10800  pBlock->Init(
    10801  m_hAllocator,
    10802  m_MemoryTypeIndex,
    10803  mem,
    10804  allocInfo.allocationSize,
    10805  m_NextBlockId++,
    10806  m_Algorithm);
    10807 
    10808  m_Blocks.push_back(pBlock);
    10809  if(pNewBlockIndex != VMA_NULL)
    10810  {
    10811  *pNewBlockIndex = m_Blocks.size() - 1;
    10812  }
    10813 
    10814  return VK_SUCCESS;
    10815 }
    10816 
    10817 #if VMA_STATS_STRING_ENABLED
    10818 
    10819 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10820 {
    10821  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10822 
    10823  json.BeginObject();
    10824 
    10825  if(m_IsCustomPool)
    10826  {
    10827  json.WriteString("MemoryTypeIndex");
    10828  json.WriteNumber(m_MemoryTypeIndex);
    10829 
    10830  json.WriteString("BlockSize");
    10831  json.WriteNumber(m_PreferredBlockSize);
    10832 
    10833  json.WriteString("BlockCount");
    10834  json.BeginObject(true);
    10835  if(m_MinBlockCount > 0)
    10836  {
    10837  json.WriteString("Min");
    10838  json.WriteNumber((uint64_t)m_MinBlockCount);
    10839  }
    10840  if(m_MaxBlockCount < SIZE_MAX)
    10841  {
    10842  json.WriteString("Max");
    10843  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10844  }
    10845  json.WriteString("Cur");
    10846  json.WriteNumber((uint64_t)m_Blocks.size());
    10847  json.EndObject();
    10848 
    10849  if(m_FrameInUseCount > 0)
    10850  {
    10851  json.WriteString("FrameInUseCount");
    10852  json.WriteNumber(m_FrameInUseCount);
    10853  }
    10854 
    10855  if(m_Algorithm != 0)
    10856  {
    10857  json.WriteString("Algorithm");
    10858  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10859  }
    10860  }
    10861  else
    10862  {
    10863  json.WriteString("PreferredBlockSize");
    10864  json.WriteNumber(m_PreferredBlockSize);
    10865  }
    10866 
    10867  json.WriteString("Blocks");
    10868  json.BeginObject();
    10869  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10870  {
    10871  json.BeginString();
    10872  json.ContinueString(m_Blocks[i]->GetId());
    10873  json.EndString();
    10874 
    10875  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10876  }
    10877  json.EndObject();
    10878 
    10879  json.EndObject();
    10880 }
    10881 
    10882 #endif // #if VMA_STATS_STRING_ENABLED
    10883 
    10884 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10885  VmaAllocator hAllocator,
    10886  uint32_t currentFrameIndex)
    10887 {
    10888  if(m_pDefragmentator == VMA_NULL)
    10889  {
    10890  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10891  hAllocator,
    10892  this,
    10893  currentFrameIndex);
    10894  }
    10895 
    10896  return m_pDefragmentator;
    10897 }
    10898 
    10899 VkResult VmaBlockVector::Defragment(
    10900  VmaDefragmentationStats* pDefragmentationStats,
    10901  VkDeviceSize& maxBytesToMove,
    10902  uint32_t& maxAllocationsToMove)
    10903 {
    10904  if(m_pDefragmentator == VMA_NULL)
    10905  {
    10906  return VK_SUCCESS;
    10907  }
    10908 
    10909  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10910 
    10911  // Defragment.
    10912  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10913 
    10914  // Accumulate statistics.
    10915  if(pDefragmentationStats != VMA_NULL)
    10916  {
    10917  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10918  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10919  pDefragmentationStats->bytesMoved += bytesMoved;
    10920  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10921  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10922  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10923  maxBytesToMove -= bytesMoved;
    10924  maxAllocationsToMove -= allocationsMoved;
    10925  }
    10926 
    10927  // Free empty blocks.
    10928  m_HasEmptyBlock = false;
    10929  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10930  {
    10931  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10932  if(pBlock->m_pMetadata->IsEmpty())
    10933  {
    10934  if(m_Blocks.size() > m_MinBlockCount)
    10935  {
    10936  if(pDefragmentationStats != VMA_NULL)
    10937  {
    10938  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10939  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10940  }
    10941 
    10942  VmaVectorRemove(m_Blocks, blockIndex);
    10943  pBlock->Destroy(m_hAllocator);
    10944  vma_delete(m_hAllocator, pBlock);
    10945  }
    10946  else
    10947  {
    10948  m_HasEmptyBlock = true;
    10949  }
    10950  }
    10951  }
    10952 
    10953  return result;
    10954 }
    10955 
    10956 void VmaBlockVector::DestroyDefragmentator()
    10957 {
    10958  if(m_pDefragmentator != VMA_NULL)
    10959  {
    10960  vma_delete(m_hAllocator, m_pDefragmentator);
    10961  m_pDefragmentator = VMA_NULL;
    10962  }
    10963 }
    10964 
    10965 void VmaBlockVector::MakePoolAllocationsLost(
    10966  uint32_t currentFrameIndex,
    10967  size_t* pLostAllocationCount)
    10968 {
    10969  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10970  size_t lostAllocationCount = 0;
    10971  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10972  {
    10973  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10974  VMA_ASSERT(pBlock);
    10975  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10976  }
    10977  if(pLostAllocationCount != VMA_NULL)
    10978  {
    10979  *pLostAllocationCount = lostAllocationCount;
    10980  }
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CheckCorruption()
    10984 {
    10985  if(!IsCorruptionDetectionEnabled())
    10986  {
    10987  return VK_ERROR_FEATURE_NOT_PRESENT;
    10988  }
    10989 
    10990  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10991  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10992  {
    10993  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10994  VMA_ASSERT(pBlock);
    10995  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10996  if(res != VK_SUCCESS)
    10997  {
    10998  return res;
    10999  }
    11000  }
    11001  return VK_SUCCESS;
    11002 }
    11003 
    11004 void VmaBlockVector::AddStats(VmaStats* pStats)
    11005 {
    11006  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11007  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11008 
    11009  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11010 
    11011  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11012  {
    11013  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11014  VMA_ASSERT(pBlock);
    11015  VMA_HEAVY_ASSERT(pBlock->Validate());
    11016  VmaStatInfo allocationStatInfo;
    11017  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11018  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11019  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11020  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11021  }
    11022 }
    11023 
    11025 // VmaDefragmentator members definition
    11026 
    11027 VmaDefragmentator::VmaDefragmentator(
    11028  VmaAllocator hAllocator,
    11029  VmaBlockVector* pBlockVector,
    11030  uint32_t currentFrameIndex) :
    11031  m_hAllocator(hAllocator),
    11032  m_pBlockVector(pBlockVector),
    11033  m_CurrentFrameIndex(currentFrameIndex),
    11034  m_BytesMoved(0),
    11035  m_AllocationsMoved(0),
    11036  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11037  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11038 {
    11039  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11040 }
    11041 
    11042 VmaDefragmentator::~VmaDefragmentator()
    11043 {
    11044  for(size_t i = m_Blocks.size(); i--; )
    11045  {
    11046  vma_delete(m_hAllocator, m_Blocks[i]);
    11047  }
    11048 }
    11049 
    11050 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11051 {
    11052  AllocationInfo allocInfo;
    11053  allocInfo.m_hAllocation = hAlloc;
    11054  allocInfo.m_pChanged = pChanged;
    11055  m_Allocations.push_back(allocInfo);
    11056 }
    11057 
    11058 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11059 {
    11060  // It has already been mapped for defragmentation.
    11061  if(m_pMappedDataForDefragmentation)
    11062  {
    11063  *ppMappedData = m_pMappedDataForDefragmentation;
    11064  return VK_SUCCESS;
    11065  }
    11066 
    11067  // It is originally mapped.
    11068  if(m_pBlock->GetMappedData())
    11069  {
    11070  *ppMappedData = m_pBlock->GetMappedData();
    11071  return VK_SUCCESS;
    11072  }
    11073 
    11074  // Map on first usage.
    11075  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11076  *ppMappedData = m_pMappedDataForDefragmentation;
    11077  return res;
    11078 }
    11079 
    11080 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11081 {
    11082  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11083  {
    11084  m_pBlock->Unmap(hAllocator, 1);
    11085  }
    11086 }
    11087 
    11088 VkResult VmaDefragmentator::DefragmentRound(
    11089  VkDeviceSize maxBytesToMove,
    11090  uint32_t maxAllocationsToMove)
    11091 {
    11092  if(m_Blocks.empty())
    11093  {
    11094  return VK_SUCCESS;
    11095  }
    11096 
    11097  size_t srcBlockIndex = m_Blocks.size() - 1;
    11098  size_t srcAllocIndex = SIZE_MAX;
    11099  for(;;)
    11100  {
    11101  // 1. Find next allocation to move.
    11102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11103  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11105  {
    11106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11107  {
    11108  // Finished: no more allocations to process.
    11109  if(srcBlockIndex == 0)
    11110  {
    11111  return VK_SUCCESS;
    11112  }
    11113  else
    11114  {
    11115  --srcBlockIndex;
    11116  srcAllocIndex = SIZE_MAX;
    11117  }
    11118  }
    11119  else
    11120  {
    11121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11122  }
    11123  }
    11124 
    11125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11127 
    11128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11132 
    11133  // 2. Try to find new place for this allocation in preceding or current block.
    11134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11135  {
    11136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11137  VmaAllocationRequest dstAllocRequest;
    11138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11139  m_CurrentFrameIndex,
    11140  m_pBlockVector->GetFrameInUseCount(),
    11141  m_pBlockVector->GetBufferImageGranularity(),
    11142  size,
    11143  alignment,
    11144  false, // upperAddress
    11145  suballocType,
    11146  false, // canMakeOtherLost
    11148  &dstAllocRequest) &&
    11149  MoveMakesSense(
    11150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11151  {
    11152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11153 
    11154  // Reached limit on number of allocations or bytes to move.
    11155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11156  (m_BytesMoved + size > maxBytesToMove))
    11157  {
    11158  return VK_INCOMPLETE;
    11159  }
    11160 
    11161  void* pDstMappedData = VMA_NULL;
    11162  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11163  if(res != VK_SUCCESS)
    11164  {
    11165  return res;
    11166  }
    11167 
    11168  void* pSrcMappedData = VMA_NULL;
    11169  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11170  if(res != VK_SUCCESS)
    11171  {
    11172  return res;
    11173  }
    11174 
    11175  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11176  memcpy(
    11177  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11178  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11179  static_cast<size_t>(size));
    11180 
    11181  if(VMA_DEBUG_MARGIN > 0)
    11182  {
    11183  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11184  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11185  }
    11186 
    11187  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11188  dstAllocRequest,
    11189  suballocType,
    11190  size,
    11191  false, // upperAddress
    11192  allocInfo.m_hAllocation);
    11193  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11194 
    11195  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11196 
    11197  if(allocInfo.m_pChanged != VMA_NULL)
    11198  {
    11199  *allocInfo.m_pChanged = VK_TRUE;
    11200  }
    11201 
    11202  ++m_AllocationsMoved;
    11203  m_BytesMoved += size;
    11204 
    11205  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11206 
    11207  break;
    11208  }
    11209  }
    11210 
    11211  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11212 
    11213  if(srcAllocIndex > 0)
    11214  {
    11215  --srcAllocIndex;
    11216  }
    11217  else
    11218  {
    11219  if(srcBlockIndex > 0)
    11220  {
    11221  --srcBlockIndex;
    11222  srcAllocIndex = SIZE_MAX;
    11223  }
    11224  else
    11225  {
    11226  return VK_SUCCESS;
    11227  }
    11228  }
    11229  }
    11230 }
    11231 
    11232 VkResult VmaDefragmentator::Defragment(
    11233  VkDeviceSize maxBytesToMove,
    11234  uint32_t maxAllocationsToMove)
    11235 {
    11236  if(m_Allocations.empty())
    11237  {
    11238  return VK_SUCCESS;
    11239  }
    11240 
    11241  // Create block info for each block.
    11242  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11243  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11244  {
    11245  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11246  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11247  m_Blocks.push_back(pBlockInfo);
    11248  }
    11249 
    11250  // Sort them by m_pBlock pointer value.
    11251  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11252 
    11253  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11254  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11255  {
    11256  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11257  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11258  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11259  {
    11260  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11261  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11262  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11263  {
    11264  (*it)->m_Allocations.push_back(allocInfo);
    11265  }
    11266  else
    11267  {
    11268  VMA_ASSERT(0);
    11269  }
    11270  }
    11271  }
    11272  m_Allocations.clear();
    11273 
    11274  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11275  {
    11276  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11277  pBlockInfo->CalcHasNonMovableAllocations();
    11278  pBlockInfo->SortAllocationsBySizeDescecnding();
    11279  }
    11280 
    11281  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11282  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11283 
    11284  // Execute defragmentation rounds (the main part).
    11285  VkResult result = VK_SUCCESS;
    11286  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11287  {
    11288  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11289  }
    11290 
    11291  // Unmap blocks that were mapped for defragmentation.
    11292  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11293  {
    11294  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11295  }
    11296 
    11297  return result;
    11298 }
    11299 
    11300 bool VmaDefragmentator::MoveMakesSense(
    11301  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11302  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11303 {
    11304  if(dstBlockIndex < srcBlockIndex)
    11305  {
    11306  return true;
    11307  }
    11308  if(dstBlockIndex > srcBlockIndex)
    11309  {
    11310  return false;
    11311  }
    11312  if(dstOffset < srcOffset)
    11313  {
    11314  return true;
    11315  }
    11316  return false;
    11317 }
    11318 
    11320 // VmaRecorder
    11321 
    11322 #if VMA_RECORDING_ENABLED
    11323 
    11324 VmaRecorder::VmaRecorder() :
    11325  m_UseMutex(true),
    11326  m_Flags(0),
    11327  m_File(VMA_NULL),
    11328  m_Freq(INT64_MAX),
    11329  m_StartCounter(INT64_MAX)
    11330 {
    11331 }
    11332 
    11333 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11334 {
    11335  m_UseMutex = useMutex;
    11336  m_Flags = settings.flags;
    11337 
    11338  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11339  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11340 
    11341  // Open file for writing.
    11342  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11343  if(err != 0)
    11344  {
    11345  return VK_ERROR_INITIALIZATION_FAILED;
    11346  }
    11347 
    11348  // Write header.
    11349  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11350  fprintf(m_File, "%s\n", "1,3");
    11351 
    11352  return VK_SUCCESS;
    11353 }
    11354 
    11355 VmaRecorder::~VmaRecorder()
    11356 {
    11357  if(m_File != VMA_NULL)
    11358  {
    11359  fclose(m_File);
    11360  }
    11361 }
    11362 
    11363 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11364 {
    11365  CallParams callParams;
    11366  GetBasicParams(callParams);
    11367 
    11368  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11369  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11370  Flush();
    11371 }
    11372 
    11373 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11374 {
    11375  CallParams callParams;
    11376  GetBasicParams(callParams);
    11377 
    11378  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11379  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11380  Flush();
    11381 }
    11382 
    11383 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11384 {
    11385  CallParams callParams;
    11386  GetBasicParams(callParams);
    11387 
    11388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11389  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11390  createInfo.memoryTypeIndex,
    11391  createInfo.flags,
    11392  createInfo.blockSize,
    11393  (uint64_t)createInfo.minBlockCount,
    11394  (uint64_t)createInfo.maxBlockCount,
    11395  createInfo.frameInUseCount,
    11396  pool);
    11397  Flush();
    11398 }
    11399 
    11400 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11401 {
    11402  CallParams callParams;
    11403  GetBasicParams(callParams);
    11404 
    11405  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11406  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11407  pool);
    11408  Flush();
    11409 }
    11410 
    11411 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11412  const VkMemoryRequirements& vkMemReq,
    11413  const VmaAllocationCreateInfo& createInfo,
    11414  VmaAllocation allocation)
    11415 {
    11416  CallParams callParams;
    11417  GetBasicParams(callParams);
    11418 
    11419  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11420  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11421  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11422  vkMemReq.size,
    11423  vkMemReq.alignment,
    11424  vkMemReq.memoryTypeBits,
    11425  createInfo.flags,
    11426  createInfo.usage,
    11427  createInfo.requiredFlags,
    11428  createInfo.preferredFlags,
    11429  createInfo.memoryTypeBits,
    11430  createInfo.pool,
    11431  allocation,
    11432  userDataStr.GetString());
    11433  Flush();
    11434 }
    11435 
    11436 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11437  const VkMemoryRequirements& vkMemReq,
    11438  bool requiresDedicatedAllocation,
    11439  bool prefersDedicatedAllocation,
    11440  const VmaAllocationCreateInfo& createInfo,
    11441  VmaAllocation allocation)
    11442 {
    11443  CallParams callParams;
    11444  GetBasicParams(callParams);
    11445 
    11446  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11447  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11448  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11449  vkMemReq.size,
    11450  vkMemReq.alignment,
    11451  vkMemReq.memoryTypeBits,
    11452  requiresDedicatedAllocation ? 1 : 0,
    11453  prefersDedicatedAllocation ? 1 : 0,
    11454  createInfo.flags,
    11455  createInfo.usage,
    11456  createInfo.requiredFlags,
    11457  createInfo.preferredFlags,
    11458  createInfo.memoryTypeBits,
    11459  createInfo.pool,
    11460  allocation,
    11461  userDataStr.GetString());
    11462  Flush();
    11463 }
    11464 
    11465 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11466  const VkMemoryRequirements& vkMemReq,
    11467  bool requiresDedicatedAllocation,
    11468  bool prefersDedicatedAllocation,
    11469  const VmaAllocationCreateInfo& createInfo,
    11470  VmaAllocation allocation)
    11471 {
    11472  CallParams callParams;
    11473  GetBasicParams(callParams);
    11474 
    11475  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11476  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11477  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11478  vkMemReq.size,
    11479  vkMemReq.alignment,
    11480  vkMemReq.memoryTypeBits,
    11481  requiresDedicatedAllocation ? 1 : 0,
    11482  prefersDedicatedAllocation ? 1 : 0,
    11483  createInfo.flags,
    11484  createInfo.usage,
    11485  createInfo.requiredFlags,
    11486  createInfo.preferredFlags,
    11487  createInfo.memoryTypeBits,
    11488  createInfo.pool,
    11489  allocation,
    11490  userDataStr.GetString());
    11491  Flush();
    11492 }
    11493 
    11494 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11495  VmaAllocation allocation)
    11496 {
    11497  CallParams callParams;
    11498  GetBasicParams(callParams);
    11499 
    11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11501  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11502  allocation);
    11503  Flush();
    11504 }
    11505 
    11506 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11507  VmaAllocation allocation,
    11508  const void* pUserData)
    11509 {
    11510  CallParams callParams;
    11511  GetBasicParams(callParams);
    11512 
    11513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11514  UserDataString userDataStr(
    11515  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11516  pUserData);
    11517  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11518  allocation,
    11519  userDataStr.GetString());
    11520  Flush();
    11521 }
    11522 
    11523 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11524  VmaAllocation allocation)
    11525 {
    11526  CallParams callParams;
    11527  GetBasicParams(callParams);
    11528 
    11529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11530  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11531  allocation);
    11532  Flush();
    11533 }
    11534 
    11535 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11536  VmaAllocation allocation)
    11537 {
    11538  CallParams callParams;
    11539  GetBasicParams(callParams);
    11540 
    11541  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11542  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11543  allocation);
    11544  Flush();
    11545 }
    11546 
    11547 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11548  VmaAllocation allocation)
    11549 {
    11550  CallParams callParams;
    11551  GetBasicParams(callParams);
    11552 
    11553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11554  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11555  allocation);
    11556  Flush();
    11557 }
    11558 
    11559 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11560  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11561 {
    11562  CallParams callParams;
    11563  GetBasicParams(callParams);
    11564 
    11565  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11566  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11567  allocation,
    11568  offset,
    11569  size);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11575 {
    11576  CallParams callParams;
    11577  GetBasicParams(callParams);
    11578 
    11579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11580  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11581  allocation,
    11582  offset,
    11583  size);
    11584  Flush();
    11585 }
    11586 
    11587 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11588  const VkBufferCreateInfo& bufCreateInfo,
    11589  const VmaAllocationCreateInfo& allocCreateInfo,
    11590  VmaAllocation allocation)
    11591 {
    11592  CallParams callParams;
    11593  GetBasicParams(callParams);
    11594 
    11595  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11596  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11597  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11598  bufCreateInfo.flags,
    11599  bufCreateInfo.size,
    11600  bufCreateInfo.usage,
    11601  bufCreateInfo.sharingMode,
    11602  allocCreateInfo.flags,
    11603  allocCreateInfo.usage,
    11604  allocCreateInfo.requiredFlags,
    11605  allocCreateInfo.preferredFlags,
    11606  allocCreateInfo.memoryTypeBits,
    11607  allocCreateInfo.pool,
    11608  allocation,
    11609  userDataStr.GetString());
    11610  Flush();
    11611 }
    11612 
    11613 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11614  const VkImageCreateInfo& imageCreateInfo,
    11615  const VmaAllocationCreateInfo& allocCreateInfo,
    11616  VmaAllocation allocation)
    11617 {
    11618  CallParams callParams;
    11619  GetBasicParams(callParams);
    11620 
    11621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11622  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11623  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11624  imageCreateInfo.flags,
    11625  imageCreateInfo.imageType,
    11626  imageCreateInfo.format,
    11627  imageCreateInfo.extent.width,
    11628  imageCreateInfo.extent.height,
    11629  imageCreateInfo.extent.depth,
    11630  imageCreateInfo.mipLevels,
    11631  imageCreateInfo.arrayLayers,
    11632  imageCreateInfo.samples,
    11633  imageCreateInfo.tiling,
    11634  imageCreateInfo.usage,
    11635  imageCreateInfo.sharingMode,
    11636  imageCreateInfo.initialLayout,
    11637  allocCreateInfo.flags,
    11638  allocCreateInfo.usage,
    11639  allocCreateInfo.requiredFlags,
    11640  allocCreateInfo.preferredFlags,
    11641  allocCreateInfo.memoryTypeBits,
    11642  allocCreateInfo.pool,
    11643  allocation,
    11644  userDataStr.GetString());
    11645  Flush();
    11646 }
    11647 
    11648 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11649  VmaAllocation allocation)
    11650 {
    11651  CallParams callParams;
    11652  GetBasicParams(callParams);
    11653 
    11654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11655  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11656  allocation);
    11657  Flush();
    11658 }
    11659 
    11660 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11661  VmaAllocation allocation)
    11662 {
    11663  CallParams callParams;
    11664  GetBasicParams(callParams);
    11665 
    11666  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11667  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11668  allocation);
    11669  Flush();
    11670 }
    11671 
    11672 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11673  VmaAllocation allocation)
    11674 {
    11675  CallParams callParams;
    11676  GetBasicParams(callParams);
    11677 
    11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11679  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11680  allocation);
    11681  Flush();
    11682 }
    11683 
    11684 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11685  VmaAllocation allocation)
    11686 {
    11687  CallParams callParams;
    11688  GetBasicParams(callParams);
    11689 
    11690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11691  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11692  allocation);
    11693  Flush();
    11694 }
    11695 
    11696 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11697  VmaPool pool)
    11698 {
    11699  CallParams callParams;
    11700  GetBasicParams(callParams);
    11701 
    11702  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11703  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11704  pool);
    11705  Flush();
    11706 }
    11707 
    11708 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11709 {
    11710  if(pUserData != VMA_NULL)
    11711  {
    11712  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11713  {
    11714  m_Str = (const char*)pUserData;
    11715  }
    11716  else
    11717  {
    11718  sprintf_s(m_PtrStr, "%p", pUserData);
    11719  m_Str = m_PtrStr;
    11720  }
    11721  }
    11722  else
    11723  {
    11724  m_Str = "";
    11725  }
    11726 }
    11727 
    11728 void VmaRecorder::WriteConfiguration(
    11729  const VkPhysicalDeviceProperties& devProps,
    11730  const VkPhysicalDeviceMemoryProperties& memProps,
    11731  bool dedicatedAllocationExtensionEnabled)
    11732 {
    11733  fprintf(m_File, "Config,Begin\n");
    11734 
    11735  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11736  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11737  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11738  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11739  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11740  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11743  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11744  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11745 
    11746  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11747  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11748  {
    11749  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11750  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11751  }
    11752  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11753  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11754  {
    11755  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11756  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11757  }
    11758 
    11759  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11760 
    11761  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11764  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11765  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11766  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11767  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11768  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11769  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11770 
    11771  fprintf(m_File, "Config,End\n");
    11772 }
    11773 
    11774 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11775 {
    11776  outParams.threadId = GetCurrentThreadId();
    11777 
    11778  LARGE_INTEGER counter;
    11779  QueryPerformanceCounter(&counter);
    11780  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11781 }
    11782 
    11783 void VmaRecorder::Flush()
    11784 {
    11785  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11786  {
    11787  fflush(m_File);
    11788  }
    11789 }
    11790 
    11791 #endif // #if VMA_RECORDING_ENABLED
    11792 
    11794 // VmaAllocator_T
    11795 
    11796 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11797  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11798  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11799  m_hDevice(pCreateInfo->device),
    11800  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11801  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11802  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11803  m_PreferredLargeHeapBlockSize(0),
    11804  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11805  m_CurrentFrameIndex(0),
    11806  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11807  m_NextPoolId(0)
    11809  ,m_pRecorder(VMA_NULL)
    11810 #endif
    11811 {
    11812  if(VMA_DEBUG_DETECT_CORRUPTION)
    11813  {
    11814  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11815  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11816  }
    11817 
    11818  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11819 
    11820 #if !(VMA_DEDICATED_ALLOCATION)
    11822  {
    11823  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11824  }
    11825 #endif
    11826 
    11827  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11828  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11829  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11830 
    11831  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11832  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11833 
    11834  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11835  {
    11836  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11837  }
    11838 
    11839  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11840  {
    11841  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11842  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11843  }
    11844 
    11845  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11846 
    11847  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11848  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11849 
    11850  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11851  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11852  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11853  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11854 
    11855  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11856  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11857 
    11858  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11859  {
    11860  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11861  {
    11862  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11863  if(limit != VK_WHOLE_SIZE)
    11864  {
    11865  m_HeapSizeLimit[heapIndex] = limit;
    11866  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11867  {
    11868  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11869  }
    11870  }
    11871  }
    11872  }
    11873 
    11874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11875  {
    11876  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11877 
    11878  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11879  this,
    11880  memTypeIndex,
    11881  preferredBlockSize,
    11882  0,
    11883  SIZE_MAX,
    11884  GetBufferImageGranularity(),
    11885  pCreateInfo->frameInUseCount,
    11886  false, // isCustomPool
    11887  false, // explicitBlockSize
    11888  false); // linearAlgorithm
    11889  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11890  // becase minBlockCount is 0.
    11891  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11892 
    11893  }
    11894 }
    11895 
    11896 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11897 {
    11898  VkResult res = VK_SUCCESS;
    11899 
    11900  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11901  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11902  {
    11903 #if VMA_RECORDING_ENABLED
    11904  m_pRecorder = vma_new(this, VmaRecorder)();
    11905  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11906  if(res != VK_SUCCESS)
    11907  {
    11908  return res;
    11909  }
    11910  m_pRecorder->WriteConfiguration(
    11911  m_PhysicalDeviceProperties,
    11912  m_MemProps,
    11913  m_UseKhrDedicatedAllocation);
    11914  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11915 #else
    11916  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11917  return VK_ERROR_FEATURE_NOT_PRESENT;
    11918 #endif
    11919  }
    11920 
    11921  return res;
    11922 }
    11923 
    11924 VmaAllocator_T::~VmaAllocator_T()
    11925 {
    11926 #if VMA_RECORDING_ENABLED
    11927  if(m_pRecorder != VMA_NULL)
    11928  {
    11929  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11930  vma_delete(this, m_pRecorder);
    11931  }
    11932 #endif
    11933 
    11934  VMA_ASSERT(m_Pools.empty());
    11935 
    11936  for(size_t i = GetMemoryTypeCount(); i--; )
    11937  {
    11938  vma_delete(this, m_pDedicatedAllocations[i]);
    11939  vma_delete(this, m_pBlockVectors[i]);
    11940  }
    11941 }
    11942 
    11943 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11944 {
    11945 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11946  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11947  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11948  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11949  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11950  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11951  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11952  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11953  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11954  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11955  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11956  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11957  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11958  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11959  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11960  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11961  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11962 #if VMA_DEDICATED_ALLOCATION
    11963  if(m_UseKhrDedicatedAllocation)
    11964  {
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11966  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11967  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11968  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11969  }
    11970 #endif // #if VMA_DEDICATED_ALLOCATION
    11971 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11972 
    11973 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11974  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11975 
    11976  if(pVulkanFunctions != VMA_NULL)
    11977  {
    11978  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11979  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11980  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11981  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11982  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11985  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11986  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11987  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11988  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11989  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11990  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11991  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11992  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11993  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11994 #if VMA_DEDICATED_ALLOCATION
    11995  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11996  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11997 #endif
    11998  }
    11999 
    12000 #undef VMA_COPY_IF_NOT_NULL
    12001 
    12002  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12003  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12004  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12020 #if VMA_DEDICATED_ALLOCATION
    12021  if(m_UseKhrDedicatedAllocation)
    12022  {
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12025  }
    12026 #endif
    12027 }
    12028 
    12029 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12030 {
    12031  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12032  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12033  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12034  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12035 }
    12036 
    12037 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12038  VkDeviceSize size,
    12039  VkDeviceSize alignment,
    12040  bool dedicatedAllocation,
    12041  VkBuffer dedicatedBuffer,
    12042  VkImage dedicatedImage,
    12043  const VmaAllocationCreateInfo& createInfo,
    12044  uint32_t memTypeIndex,
    12045  VmaSuballocationType suballocType,
    12046  VmaAllocation* pAllocation)
    12047 {
    12048  VMA_ASSERT(pAllocation != VMA_NULL);
    12049  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12050 
    12051  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12052 
    12053  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12054  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12055  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12056  {
    12057  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12058  }
    12059 
    12060  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12061  VMA_ASSERT(blockVector);
    12062 
    12063  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12064  bool preferDedicatedMemory =
    12065  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12066  dedicatedAllocation ||
    12067  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12068  size > preferredBlockSize / 2;
    12069 
    12070  if(preferDedicatedMemory &&
    12071  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12072  finalCreateInfo.pool == VK_NULL_HANDLE)
    12073  {
    12075  }
    12076 
    12077  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12078  {
    12079  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12080  {
    12081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12082  }
    12083  else
    12084  {
    12085  return AllocateDedicatedMemory(
    12086  size,
    12087  suballocType,
    12088  memTypeIndex,
    12089  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12090  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12091  finalCreateInfo.pUserData,
    12092  dedicatedBuffer,
    12093  dedicatedImage,
    12094  pAllocation);
    12095  }
    12096  }
    12097  else
    12098  {
    12099  VkResult res = blockVector->Allocate(
    12100  VK_NULL_HANDLE, // hCurrentPool
    12101  m_CurrentFrameIndex.load(),
    12102  size,
    12103  alignment,
    12104  finalCreateInfo,
    12105  suballocType,
    12106  pAllocation);
    12107  if(res == VK_SUCCESS)
    12108  {
    12109  return res;
    12110  }
    12111 
    12112  // 5. Try dedicated memory.
    12113  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12114  {
    12115  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12116  }
    12117  else
    12118  {
    12119  res = AllocateDedicatedMemory(
    12120  size,
    12121  suballocType,
    12122  memTypeIndex,
    12123  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12124  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12125  finalCreateInfo.pUserData,
    12126  dedicatedBuffer,
    12127  dedicatedImage,
    12128  pAllocation);
    12129  if(res == VK_SUCCESS)
    12130  {
    12131  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12132  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12133  return VK_SUCCESS;
    12134  }
    12135  else
    12136  {
    12137  // Everything failed: Return error code.
    12138  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12139  return res;
    12140  }
    12141  }
    12142  }
    12143 }
    12144 
    12145 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12146  VkDeviceSize size,
    12147  VmaSuballocationType suballocType,
    12148  uint32_t memTypeIndex,
    12149  bool map,
    12150  bool isUserDataString,
    12151  void* pUserData,
    12152  VkBuffer dedicatedBuffer,
    12153  VkImage dedicatedImage,
    12154  VmaAllocation* pAllocation)
    12155 {
    12156  VMA_ASSERT(pAllocation);
    12157 
    12158  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12159  allocInfo.memoryTypeIndex = memTypeIndex;
    12160  allocInfo.allocationSize = size;
    12161 
    12162 #if VMA_DEDICATED_ALLOCATION
    12163  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12164  if(m_UseKhrDedicatedAllocation)
    12165  {
    12166  if(dedicatedBuffer != VK_NULL_HANDLE)
    12167  {
    12168  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12169  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12170  allocInfo.pNext = &dedicatedAllocInfo;
    12171  }
    12172  else if(dedicatedImage != VK_NULL_HANDLE)
    12173  {
    12174  dedicatedAllocInfo.image = dedicatedImage;
    12175  allocInfo.pNext = &dedicatedAllocInfo;
    12176  }
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 
    12180  // Allocate VkDeviceMemory.
    12181  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12182  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12183  if(res < 0)
    12184  {
    12185  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12186  return res;
    12187  }
    12188 
    12189  void* pMappedData = VMA_NULL;
    12190  if(map)
    12191  {
    12192  res = (*m_VulkanFunctions.vkMapMemory)(
    12193  m_hDevice,
    12194  hMemory,
    12195  0,
    12196  VK_WHOLE_SIZE,
    12197  0,
    12198  &pMappedData);
    12199  if(res < 0)
    12200  {
    12201  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12202  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12203  return res;
    12204  }
    12205  }
    12206 
    12207  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12208  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12209  (*pAllocation)->SetUserData(this, pUserData);
    12210  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12211  {
    12212  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12213  }
    12214 
    12215  // Register it in m_pDedicatedAllocations.
    12216  {
    12217  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12218  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12219  VMA_ASSERT(pDedicatedAllocations);
    12220  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12221  }
    12222 
    12223  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12224 
    12225  return VK_SUCCESS;
    12226 }
    12227 
    12228 void VmaAllocator_T::GetBufferMemoryRequirements(
    12229  VkBuffer hBuffer,
    12230  VkMemoryRequirements& memReq,
    12231  bool& requiresDedicatedAllocation,
    12232  bool& prefersDedicatedAllocation) const
    12233 {
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12238  memReqInfo.buffer = hBuffer;
    12239 
    12240  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12241 
    12242  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12243  memReq2.pNext = &memDedicatedReq;
    12244 
    12245  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12246 
    12247  memReq = memReq2.memoryRequirements;
    12248  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12249  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12250  }
    12251  else
    12252 #endif // #if VMA_DEDICATED_ALLOCATION
    12253  {
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12255  requiresDedicatedAllocation = false;
    12256  prefersDedicatedAllocation = false;
    12257  }
    12258 }
    12259 
    12260 void VmaAllocator_T::GetImageMemoryRequirements(
    12261  VkImage hImage,
    12262  VkMemoryRequirements& memReq,
    12263  bool& requiresDedicatedAllocation,
    12264  bool& prefersDedicatedAllocation) const
    12265 {
    12266 #if VMA_DEDICATED_ALLOCATION
    12267  if(m_UseKhrDedicatedAllocation)
    12268  {
    12269  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12270  memReqInfo.image = hImage;
    12271 
    12272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12273 
    12274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12275  memReq2.pNext = &memDedicatedReq;
    12276 
    12277  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12278 
    12279  memReq = memReq2.memoryRequirements;
    12280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12282  }
    12283  else
    12284 #endif // #if VMA_DEDICATED_ALLOCATION
    12285  {
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12287  requiresDedicatedAllocation = false;
    12288  prefersDedicatedAllocation = false;
    12289  }
    12290 }
    12291 
    12292 VkResult VmaAllocator_T::AllocateMemory(
    12293  const VkMemoryRequirements& vkMemReq,
    12294  bool requiresDedicatedAllocation,
    12295  bool prefersDedicatedAllocation,
    12296  VkBuffer dedicatedBuffer,
    12297  VkImage dedicatedImage,
    12298  const VmaAllocationCreateInfo& createInfo,
    12299  VmaSuballocationType suballocType,
    12300  VmaAllocation* pAllocation)
    12301 {
    12302  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12303 
    12304  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12305  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12306  {
    12307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12309  }
    12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12312  {
    12313  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12314  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12315  }
    12316  if(requiresDedicatedAllocation)
    12317  {
    12318  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if(createInfo.pool != VK_NULL_HANDLE)
    12324  {
    12325  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12326  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12327  }
    12328  }
    12329  if((createInfo.pool != VK_NULL_HANDLE) &&
    12330  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12331  {
    12332  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12333  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12334  }
    12335 
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  const VkDeviceSize alignmentForPool = VMA_MAX(
    12339  vkMemReq.alignment,
    12340  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12341  return createInfo.pool->m_BlockVector.Allocate(
    12342  createInfo.pool,
    12343  m_CurrentFrameIndex.load(),
    12344  vkMemReq.size,
    12345  alignmentForPool,
    12346  createInfo,
    12347  suballocType,
    12348  pAllocation);
    12349  }
    12350  else
    12351  {
    12352  // Bit mask of memory Vulkan types acceptable for this allocation.
    12353  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12354  uint32_t memTypeIndex = UINT32_MAX;
    12355  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12356  if(res == VK_SUCCESS)
    12357  {
    12358  VkDeviceSize alignmentForMemType = VMA_MAX(
    12359  vkMemReq.alignment,
    12360  GetMemoryTypeMinAlignment(memTypeIndex));
    12361 
    12362  res = AllocateMemoryOfType(
    12363  vkMemReq.size,
    12364  alignmentForMemType,
    12365  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12366  dedicatedBuffer,
    12367  dedicatedImage,
    12368  createInfo,
    12369  memTypeIndex,
    12370  suballocType,
    12371  pAllocation);
    12372  // Succeeded on first try.
    12373  if(res == VK_SUCCESS)
    12374  {
    12375  return res;
    12376  }
    12377  // Allocation from this memory type failed. Try other compatible memory types.
    12378  else
    12379  {
    12380  for(;;)
    12381  {
    12382  // Remove old memTypeIndex from list of possibilities.
    12383  memoryTypeBits &= ~(1u << memTypeIndex);
    12384  // Find alternative memTypeIndex.
    12385  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  alignmentForMemType = VMA_MAX(
    12389  vkMemReq.alignment,
    12390  GetMemoryTypeMinAlignment(memTypeIndex));
    12391 
    12392  res = AllocateMemoryOfType(
    12393  vkMemReq.size,
    12394  alignmentForMemType,
    12395  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12396  dedicatedBuffer,
    12397  dedicatedImage,
    12398  createInfo,
    12399  memTypeIndex,
    12400  suballocType,
    12401  pAllocation);
    12402  // Allocation from this alternative memory type succeeded.
    12403  if(res == VK_SUCCESS)
    12404  {
    12405  return res;
    12406  }
    12407  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12408  }
    12409  // No other matching memory type index could be found.
    12410  else
    12411  {
    12412  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12414  }
    12415  }
    12416  }
    12417  }
    12418  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12419  else
    12420  return res;
    12421  }
    12422 }
    12423 
    12424 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12425 {
    12426  VMA_ASSERT(allocation);
    12427 
    12428  if(TouchAllocation(allocation))
    12429  {
    12430  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12431  {
    12432  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12433  }
    12434 
    12435  switch(allocation->GetType())
    12436  {
    12437  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12438  {
    12439  VmaBlockVector* pBlockVector = VMA_NULL;
    12440  VmaPool hPool = allocation->GetPool();
    12441  if(hPool != VK_NULL_HANDLE)
    12442  {
    12443  pBlockVector = &hPool->m_BlockVector;
    12444  }
    12445  else
    12446  {
    12447  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12448  pBlockVector = m_pBlockVectors[memTypeIndex];
    12449  }
    12450  pBlockVector->Free(allocation);
    12451  }
    12452  break;
    12453  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12454  FreeDedicatedMemory(allocation);
    12455  break;
    12456  default:
    12457  VMA_ASSERT(0);
    12458  }
    12459  }
    12460 
    12461  allocation->SetUserData(this, VMA_NULL);
    12462  vma_delete(this, allocation);
    12463 }
    12464 
    12465 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12466 {
    12467  // Initialize.
    12468  InitStatInfo(pStats->total);
    12469  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12470  InitStatInfo(pStats->memoryType[i]);
    12471  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12472  InitStatInfo(pStats->memoryHeap[i]);
    12473 
    12474  // Process default pools.
    12475  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12476  {
    12477  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12478  VMA_ASSERT(pBlockVector);
    12479  pBlockVector->AddStats(pStats);
    12480  }
    12481 
    12482  // Process custom pools.
    12483  {
    12484  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12485  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12486  {
    12487  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12488  }
    12489  }
    12490 
    12491  // Process dedicated allocations.
    12492  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12493  {
    12494  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12495  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12496  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12497  VMA_ASSERT(pDedicatedAllocVector);
    12498  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12499  {
    12500  VmaStatInfo allocationStatInfo;
    12501  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12502  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12503  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12504  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12505  }
    12506  }
    12507 
    12508  // Postprocess.
    12509  VmaPostprocessCalcStatInfo(pStats->total);
    12510  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12511  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12512  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12513  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12514 }
    12515 
    12516 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12517 
    12518 VkResult VmaAllocator_T::Defragment(
    12519  VmaAllocation* pAllocations,
    12520  size_t allocationCount,
    12521  VkBool32* pAllocationsChanged,
    12522  const VmaDefragmentationInfo* pDefragmentationInfo,
    12523  VmaDefragmentationStats* pDefragmentationStats)
    12524 {
    12525  if(pAllocationsChanged != VMA_NULL)
    12526  {
    12527  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12528  }
    12529  if(pDefragmentationStats != VMA_NULL)
    12530  {
    12531  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12532  }
    12533 
    12534  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12535 
    12536  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12537 
    12538  const size_t poolCount = m_Pools.size();
    12539 
    12540  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12541  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12542  {
    12543  VmaAllocation hAlloc = pAllocations[allocIndex];
    12544  VMA_ASSERT(hAlloc);
    12545  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12546  // DedicatedAlloc cannot be defragmented.
    12547  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12548  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12549  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12550  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12551  // Lost allocation cannot be defragmented.
    12552  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12553  {
    12554  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12555 
    12556  const VmaPool hAllocPool = hAlloc->GetPool();
    12557  // This allocation belongs to custom pool.
    12558  if(hAllocPool != VK_NULL_HANDLE)
    12559  {
    12560  // Pools with linear or buddy algorithm are not defragmented.
    12561  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12562  {
    12563  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12564  }
    12565  }
    12566  // This allocation belongs to general pool.
    12567  else
    12568  {
    12569  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12570  }
    12571 
    12572  if(pAllocBlockVector != VMA_NULL)
    12573  {
    12574  VmaDefragmentator* const pDefragmentator =
    12575  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12576  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12577  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12578  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12579  }
    12580  }
    12581  }
    12582 
    12583  VkResult result = VK_SUCCESS;
    12584 
    12585  // ======== Main processing.
    12586 
    12587  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12588  uint32_t maxAllocationsToMove = UINT32_MAX;
    12589  if(pDefragmentationInfo != VMA_NULL)
    12590  {
    12591  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12592  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12593  }
    12594 
    12595  // Process standard memory.
    12596  for(uint32_t memTypeIndex = 0;
    12597  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12598  ++memTypeIndex)
    12599  {
    12600  // Only HOST_VISIBLE memory types can be defragmented.
    12601  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12602  {
    12603  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12604  pDefragmentationStats,
    12605  maxBytesToMove,
    12606  maxAllocationsToMove);
    12607  }
    12608  }
    12609 
    12610  // Process custom pools.
    12611  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12612  {
    12613  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12614  pDefragmentationStats,
    12615  maxBytesToMove,
    12616  maxAllocationsToMove);
    12617  }
    12618 
    12619  // ======== Destroy defragmentators.
    12620 
    12621  // Process custom pools.
    12622  for(size_t poolIndex = poolCount; poolIndex--; )
    12623  {
    12624  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12625  }
    12626 
    12627  // Process standard memory.
    12628  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12629  {
    12630  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12631  {
    12632  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12633  }
    12634  }
    12635 
    12636  return result;
    12637 }
    12638 
    12639 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12640 {
    12641  if(hAllocation->CanBecomeLost())
    12642  {
    12643  /*
    12644  Warning: This is a carefully designed algorithm.
    12645  Do not modify unless you really know what you're doing :)
    12646  */
    12647  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12648  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12649  for(;;)
    12650  {
    12651  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12652  {
    12653  pAllocationInfo->memoryType = UINT32_MAX;
    12654  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12655  pAllocationInfo->offset = 0;
    12656  pAllocationInfo->size = hAllocation->GetSize();
    12657  pAllocationInfo->pMappedData = VMA_NULL;
    12658  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12659  return;
    12660  }
    12661  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12662  {
    12663  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12664  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12665  pAllocationInfo->offset = hAllocation->GetOffset();
    12666  pAllocationInfo->size = hAllocation->GetSize();
    12667  pAllocationInfo->pMappedData = VMA_NULL;
    12668  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12669  return;
    12670  }
    12671  else // Last use time earlier than current time.
    12672  {
    12673  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12674  {
    12675  localLastUseFrameIndex = localCurrFrameIndex;
    12676  }
    12677  }
    12678  }
    12679  }
    12680  else
    12681  {
    12682 #if VMA_STATS_STRING_ENABLED
    12683  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12684  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12685  for(;;)
    12686  {
    12687  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12688  if(localLastUseFrameIndex == localCurrFrameIndex)
    12689  {
    12690  break;
    12691  }
    12692  else // Last use time earlier than current time.
    12693  {
    12694  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12695  {
    12696  localLastUseFrameIndex = localCurrFrameIndex;
    12697  }
    12698  }
    12699  }
    12700 #endif
    12701 
    12702  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12703  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12704  pAllocationInfo->offset = hAllocation->GetOffset();
    12705  pAllocationInfo->size = hAllocation->GetSize();
    12706  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12707  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12708  }
    12709 }
    12710 
    12711 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12712 {
    12713  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12714  if(hAllocation->CanBecomeLost())
    12715  {
    12716  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12717  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12718  for(;;)
    12719  {
    12720  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12721  {
    12722  return false;
    12723  }
    12724  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12725  {
    12726  return true;
    12727  }
    12728  else // Last use time earlier than current time.
    12729  {
    12730  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12731  {
    12732  localLastUseFrameIndex = localCurrFrameIndex;
    12733  }
    12734  }
    12735  }
    12736  }
    12737  else
    12738  {
    12739 #if VMA_STATS_STRING_ENABLED
    12740  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12741  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12742  for(;;)
    12743  {
    12744  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12745  if(localLastUseFrameIndex == localCurrFrameIndex)
    12746  {
    12747  break;
    12748  }
    12749  else // Last use time earlier than current time.
    12750  {
    12751  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12752  {
    12753  localLastUseFrameIndex = localCurrFrameIndex;
    12754  }
    12755  }
    12756  }
    12757 #endif
    12758 
    12759  return true;
    12760  }
    12761 }
    12762 
    12763 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12764 {
    12765  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12766 
    12767  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12768 
    12769  if(newCreateInfo.maxBlockCount == 0)
    12770  {
    12771  newCreateInfo.maxBlockCount = SIZE_MAX;
    12772  }
    12773  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12774  {
    12775  return VK_ERROR_INITIALIZATION_FAILED;
    12776  }
    12777 
    12778  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12779 
    12780  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12781 
    12782  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12783  if(res != VK_SUCCESS)
    12784  {
    12785  vma_delete(this, *pPool);
    12786  *pPool = VMA_NULL;
    12787  return res;
    12788  }
    12789 
    12790  // Add to m_Pools.
    12791  {
    12792  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12793  (*pPool)->SetId(m_NextPoolId++);
    12794  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12795  }
    12796 
    12797  return VK_SUCCESS;
    12798 }
    12799 
    12800 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12801 {
    12802  // Remove from m_Pools.
    12803  {
    12804  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12805  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12806  VMA_ASSERT(success && "Pool not found in Allocator.");
    12807  }
    12808 
    12809  vma_delete(this, pool);
    12810 }
    12811 
    12812 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12813 {
    12814  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12815 }
    12816 
    12817 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12818 {
    12819  m_CurrentFrameIndex.store(frameIndex);
    12820 }
    12821 
    12822 void VmaAllocator_T::MakePoolAllocationsLost(
    12823  VmaPool hPool,
    12824  size_t* pLostAllocationCount)
    12825 {
    12826  hPool->m_BlockVector.MakePoolAllocationsLost(
    12827  m_CurrentFrameIndex.load(),
    12828  pLostAllocationCount);
    12829 }
    12830 
    12831 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12832 {
    12833  return hPool->m_BlockVector.CheckCorruption();
    12834 }
    12835 
    12836 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12837 {
    12838  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12839 
    12840  // Process default pools.
    12841  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12842  {
    12843  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12844  {
    12845  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12846  VMA_ASSERT(pBlockVector);
    12847  VkResult localRes = pBlockVector->CheckCorruption();
    12848  switch(localRes)
    12849  {
    12850  case VK_ERROR_FEATURE_NOT_PRESENT:
    12851  break;
    12852  case VK_SUCCESS:
    12853  finalRes = VK_SUCCESS;
    12854  break;
    12855  default:
    12856  return localRes;
    12857  }
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  {
    12863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12864  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12865  {
    12866  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12867  {
    12868  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12869  switch(localRes)
    12870  {
    12871  case VK_ERROR_FEATURE_NOT_PRESENT:
    12872  break;
    12873  case VK_SUCCESS:
    12874  finalRes = VK_SUCCESS;
    12875  break;
    12876  default:
    12877  return localRes;
    12878  }
    12879  }
    12880  }
    12881  }
    12882 
    12883  return finalRes;
    12884 }
    12885 
    12886 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12887 {
    12888  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12889  (*pAllocation)->InitLost();
    12890 }
    12891 
    12892 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12893 {
    12894  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12895 
    12896  VkResult res;
    12897  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12898  {
    12899  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12900  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12901  {
    12902  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12903  if(res == VK_SUCCESS)
    12904  {
    12905  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12906  }
    12907  }
    12908  else
    12909  {
    12910  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12911  }
    12912  }
    12913  else
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  }
    12917 
    12918  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12919  {
    12920  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12921  }
    12922 
    12923  return res;
    12924 }
    12925 
    12926 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12927 {
    12928  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12929  {
    12930  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12931  }
    12932 
    12933  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12934 
    12935  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12936  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12937  {
    12938  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12939  m_HeapSizeLimit[heapIndex] += size;
    12940  }
    12941 }
    12942 
    12943 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12944 {
    12945  if(hAllocation->CanBecomeLost())
    12946  {
    12947  return VK_ERROR_MEMORY_MAP_FAILED;
    12948  }
    12949 
    12950  switch(hAllocation->GetType())
    12951  {
    12952  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12953  {
    12954  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12955  char *pBytes = VMA_NULL;
    12956  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12957  if(res == VK_SUCCESS)
    12958  {
    12959  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12960  hAllocation->BlockAllocMap();
    12961  }
    12962  return res;
    12963  }
    12964  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12965  return hAllocation->DedicatedAllocMap(this, ppData);
    12966  default:
    12967  VMA_ASSERT(0);
    12968  return VK_ERROR_MEMORY_MAP_FAILED;
    12969  }
    12970 }
    12971 
    12972 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12973 {
    12974  switch(hAllocation->GetType())
    12975  {
    12976  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12977  {
    12978  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12979  hAllocation->BlockAllocUnmap();
    12980  pBlock->Unmap(this, 1);
    12981  }
    12982  break;
    12983  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12984  hAllocation->DedicatedAllocUnmap(this);
    12985  break;
    12986  default:
    12987  VMA_ASSERT(0);
    12988  }
    12989 }
    12990 
    12991 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12992 {
    12993  VkResult res = VK_SUCCESS;
    12994  switch(hAllocation->GetType())
    12995  {
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  res = GetVulkanFunctions().vkBindBufferMemory(
    12998  m_hDevice,
    12999  hBuffer,
    13000  hAllocation->GetMemory(),
    13001  0); //memoryOffset
    13002  break;
    13003  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13004  {
    13005  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13006  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13007  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13008  break;
    13009  }
    13010  default:
    13011  VMA_ASSERT(0);
    13012  }
    13013  return res;
    13014 }
    13015 
    13016 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13017 {
    13018  VkResult res = VK_SUCCESS;
    13019  switch(hAllocation->GetType())
    13020  {
    13021  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13022  res = GetVulkanFunctions().vkBindImageMemory(
    13023  m_hDevice,
    13024  hImage,
    13025  hAllocation->GetMemory(),
    13026  0); //memoryOffset
    13027  break;
    13028  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13029  {
    13030  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13031  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13032  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13033  break;
    13034  }
    13035  default:
    13036  VMA_ASSERT(0);
    13037  }
    13038  return res;
    13039 }
    13040 
    13041 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13042  VmaAllocation hAllocation,
    13043  VkDeviceSize offset, VkDeviceSize size,
    13044  VMA_CACHE_OPERATION op)
    13045 {
    13046  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13047  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13048  {
    13049  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13050  VMA_ASSERT(offset <= allocationSize);
    13051 
    13052  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13053 
    13054  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13055  memRange.memory = hAllocation->GetMemory();
    13056 
    13057  switch(hAllocation->GetType())
    13058  {
    13059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13060  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13061  if(size == VK_WHOLE_SIZE)
    13062  {
    13063  memRange.size = allocationSize - memRange.offset;
    13064  }
    13065  else
    13066  {
    13067  VMA_ASSERT(offset + size <= allocationSize);
    13068  memRange.size = VMA_MIN(
    13069  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13070  allocationSize - memRange.offset);
    13071  }
    13072  break;
    13073 
    13074  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13075  {
    13076  // 1. Still within this allocation.
    13077  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13078  if(size == VK_WHOLE_SIZE)
    13079  {
    13080  size = allocationSize - offset;
    13081  }
    13082  else
    13083  {
    13084  VMA_ASSERT(offset + size <= allocationSize);
    13085  }
    13086  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13087 
    13088  // 2. Adjust to whole block.
    13089  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13090  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13091  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13092  memRange.offset += allocationOffset;
    13093  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13094 
    13095  break;
    13096  }
    13097 
    13098  default:
    13099  VMA_ASSERT(0);
    13100  }
    13101 
    13102  switch(op)
    13103  {
    13104  case VMA_CACHE_FLUSH:
    13105  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13106  break;
    13107  case VMA_CACHE_INVALIDATE:
    13108  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13109  break;
    13110  default:
    13111  VMA_ASSERT(0);
    13112  }
    13113  }
    13114  // else: Just ignore this call.
    13115 }
    13116 
    13117 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13118 {
    13119  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13120 
    13121  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13122  {
    13123  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13124  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13125  VMA_ASSERT(pDedicatedAllocations);
    13126  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13127  VMA_ASSERT(success);
    13128  }
    13129 
    13130  VkDeviceMemory hMemory = allocation->GetMemory();
    13131 
    13132  /*
    13133  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13134  before vkFreeMemory.
    13135 
    13136  if(allocation->GetMappedData() != VMA_NULL)
    13137  {
    13138  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13139  }
    13140  */
    13141 
    13142  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13143 
    13144  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13145 }
    13146 
    13147 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13148 {
    13149  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13150  !hAllocation->CanBecomeLost() &&
    13151  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13152  {
    13153  void* pData = VMA_NULL;
    13154  VkResult res = Map(hAllocation, &pData);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13158  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13159  Unmap(hAllocation);
    13160  }
    13161  else
    13162  {
    13163  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13164  }
    13165  }
    13166 }
    13167 
    13168 #if VMA_STATS_STRING_ENABLED
    13169 
    13170 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13171 {
    13172  bool dedicatedAllocationsStarted = false;
    13173  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13174  {
    13175  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13176  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13177  VMA_ASSERT(pDedicatedAllocVector);
    13178  if(pDedicatedAllocVector->empty() == false)
    13179  {
    13180  if(dedicatedAllocationsStarted == false)
    13181  {
    13182  dedicatedAllocationsStarted = true;
    13183  json.WriteString("DedicatedAllocations");
    13184  json.BeginObject();
    13185  }
    13186 
    13187  json.BeginString("Type ");
    13188  json.ContinueString(memTypeIndex);
    13189  json.EndString();
    13190 
    13191  json.BeginArray();
    13192 
    13193  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13194  {
    13195  json.BeginObject(true);
    13196  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13197  hAlloc->PrintParameters(json);
    13198  json.EndObject();
    13199  }
    13200 
    13201  json.EndArray();
    13202  }
    13203  }
    13204  if(dedicatedAllocationsStarted)
    13205  {
    13206  json.EndObject();
    13207  }
    13208 
    13209  {
    13210  bool allocationsStarted = false;
    13211  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13212  {
    13213  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13214  {
    13215  if(allocationsStarted == false)
    13216  {
    13217  allocationsStarted = true;
    13218  json.WriteString("DefaultPools");
    13219  json.BeginObject();
    13220  }
    13221 
    13222  json.BeginString("Type ");
    13223  json.ContinueString(memTypeIndex);
    13224  json.EndString();
    13225 
    13226  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13227  }
    13228  }
    13229  if(allocationsStarted)
    13230  {
    13231  json.EndObject();
    13232  }
    13233  }
    13234 
    13235  // Custom pools
    13236  {
    13237  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13238  const size_t poolCount = m_Pools.size();
    13239  if(poolCount > 0)
    13240  {
    13241  json.WriteString("Pools");
    13242  json.BeginObject();
    13243  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13244  {
    13245  json.BeginString();
    13246  json.ContinueString(m_Pools[poolIndex]->GetId());
    13247  json.EndString();
    13248 
    13249  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13250  }
    13251  json.EndObject();
    13252  }
    13253  }
    13254 }
    13255 
    13256 #endif // #if VMA_STATS_STRING_ENABLED
    13257 
    13259 // Public interface
    13260 
    13261 VkResult vmaCreateAllocator(
    13262  const VmaAllocatorCreateInfo* pCreateInfo,
    13263  VmaAllocator* pAllocator)
    13264 {
    13265  VMA_ASSERT(pCreateInfo && pAllocator);
    13266  VMA_DEBUG_LOG("vmaCreateAllocator");
    13267  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13268  return (*pAllocator)->Init(pCreateInfo);
    13269 }
    13270 
    13271 void vmaDestroyAllocator(
    13272  VmaAllocator allocator)
    13273 {
    13274  if(allocator != VK_NULL_HANDLE)
    13275  {
    13276  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13277  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13278  vma_delete(&allocationCallbacks, allocator);
    13279  }
    13280 }
    13281 
    13283  VmaAllocator allocator,
    13284  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13285 {
    13286  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13287  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13288 }
    13289 
    13291  VmaAllocator allocator,
    13292  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13293 {
    13294  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13295  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13296 }
    13297 
    13299  VmaAllocator allocator,
    13300  uint32_t memoryTypeIndex,
    13301  VkMemoryPropertyFlags* pFlags)
    13302 {
    13303  VMA_ASSERT(allocator && pFlags);
    13304  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13305  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13306 }
    13307 
    13309  VmaAllocator allocator,
    13310  uint32_t frameIndex)
    13311 {
    13312  VMA_ASSERT(allocator);
    13313  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13314 
    13315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13316 
    13317  allocator->SetCurrentFrameIndex(frameIndex);
    13318 }
    13319 
    13320 void vmaCalculateStats(
    13321  VmaAllocator allocator,
    13322  VmaStats* pStats)
    13323 {
    13324  VMA_ASSERT(allocator && pStats);
    13325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13326  allocator->CalculateStats(pStats);
    13327 }
    13328 
    13329 #if VMA_STATS_STRING_ENABLED
    13330 
    13331 void vmaBuildStatsString(
    13332  VmaAllocator allocator,
    13333  char** ppStatsString,
    13334  VkBool32 detailedMap)
    13335 {
    13336  VMA_ASSERT(allocator && ppStatsString);
    13337  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13338 
    13339  VmaStringBuilder sb(allocator);
    13340  {
    13341  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13342  json.BeginObject();
    13343 
    13344  VmaStats stats;
    13345  allocator->CalculateStats(&stats);
    13346 
    13347  json.WriteString("Total");
    13348  VmaPrintStatInfo(json, stats.total);
    13349 
    13350  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13351  {
    13352  json.BeginString("Heap ");
    13353  json.ContinueString(heapIndex);
    13354  json.EndString();
    13355  json.BeginObject();
    13356 
    13357  json.WriteString("Size");
    13358  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13359 
    13360  json.WriteString("Flags");
    13361  json.BeginArray(true);
    13362  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13363  {
    13364  json.WriteString("DEVICE_LOCAL");
    13365  }
    13366  json.EndArray();
    13367 
    13368  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13369  {
    13370  json.WriteString("Stats");
    13371  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13372  }
    13373 
    13374  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13375  {
    13376  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13377  {
    13378  json.BeginString("Type ");
    13379  json.ContinueString(typeIndex);
    13380  json.EndString();
    13381 
    13382  json.BeginObject();
    13383 
    13384  json.WriteString("Flags");
    13385  json.BeginArray(true);
    13386  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13387  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13388  {
    13389  json.WriteString("DEVICE_LOCAL");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_VISIBLE");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_COHERENT");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13400  {
    13401  json.WriteString("HOST_CACHED");
    13402  }
    13403  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13404  {
    13405  json.WriteString("LAZILY_ALLOCATED");
    13406  }
    13407  json.EndArray();
    13408 
    13409  if(stats.memoryType[typeIndex].blockCount > 0)
    13410  {
    13411  json.WriteString("Stats");
    13412  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  }
    13418 
    13419  json.EndObject();
    13420  }
    13421  if(detailedMap == VK_TRUE)
    13422  {
    13423  allocator->PrintDetailedMap(json);
    13424  }
    13425 
    13426  json.EndObject();
    13427  }
    13428 
    13429  const size_t len = sb.GetLength();
    13430  char* const pChars = vma_new_array(allocator, char, len + 1);
    13431  if(len > 0)
    13432  {
    13433  memcpy(pChars, sb.GetData(), len);
    13434  }
    13435  pChars[len] = '\0';
    13436  *ppStatsString = pChars;
    13437 }
    13438 
    13439 void vmaFreeStatsString(
    13440  VmaAllocator allocator,
    13441  char* pStatsString)
    13442 {
    13443  if(pStatsString != VMA_NULL)
    13444  {
    13445  VMA_ASSERT(allocator);
    13446  size_t len = strlen(pStatsString);
    13447  vma_delete_array(allocator, pStatsString, len + 1);
    13448  }
    13449 }
    13450 
    13451 #endif // #if VMA_STATS_STRING_ENABLED
    13452 
    13453 /*
    13454 This function is not protected by any mutex because it just reads immutable data.
    13455 */
    13456 VkResult vmaFindMemoryTypeIndex(
    13457  VmaAllocator allocator,
    13458  uint32_t memoryTypeBits,
    13459  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13460  uint32_t* pMemoryTypeIndex)
    13461 {
    13462  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13463  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13464  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13465 
    13466  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13467  {
    13468  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13469  }
    13470 
    13471  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13472  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13473 
    13474  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13475  if(mapped)
    13476  {
    13477  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13478  }
    13479 
    13480  // Convert usage to requiredFlags and preferredFlags.
    13481  switch(pAllocationCreateInfo->usage)
    13482  {
    13484  break;
    13486  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13487  {
    13488  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13489  }
    13490  break;
    13492  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13493  break;
    13495  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13496  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13497  {
    13498  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13499  }
    13500  break;
    13502  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13503  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13504  break;
    13505  default:
    13506  break;
    13507  }
    13508 
    13509  *pMemoryTypeIndex = UINT32_MAX;
    13510  uint32_t minCost = UINT32_MAX;
    13511  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13512  memTypeIndex < allocator->GetMemoryTypeCount();
    13513  ++memTypeIndex, memTypeBit <<= 1)
    13514  {
    13515  // This memory type is acceptable according to memoryTypeBits bitmask.
    13516  if((memTypeBit & memoryTypeBits) != 0)
    13517  {
    13518  const VkMemoryPropertyFlags currFlags =
    13519  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13520  // This memory type contains requiredFlags.
    13521  if((requiredFlags & ~currFlags) == 0)
    13522  {
    13523  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13524  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13525  // Remember memory type with lowest cost.
    13526  if(currCost < minCost)
    13527  {
    13528  *pMemoryTypeIndex = memTypeIndex;
    13529  if(currCost == 0)
    13530  {
    13531  return VK_SUCCESS;
    13532  }
    13533  minCost = currCost;
    13534  }
    13535  }
    13536  }
    13537  }
    13538  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkBufferCreateInfo* pBufferCreateInfo,
    13544  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13545  uint32_t* pMemoryTypeIndex)
    13546 {
    13547  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13548  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13549  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13550  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13551 
    13552  const VkDevice hDev = allocator->m_hDevice;
    13553  VkBuffer hBuffer = VK_NULL_HANDLE;
    13554  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13555  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13556  if(res == VK_SUCCESS)
    13557  {
    13558  VkMemoryRequirements memReq = {};
    13559  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13560  hDev, hBuffer, &memReq);
    13561 
    13562  res = vmaFindMemoryTypeIndex(
    13563  allocator,
    13564  memReq.memoryTypeBits,
    13565  pAllocationCreateInfo,
    13566  pMemoryTypeIndex);
    13567 
    13568  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13569  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13570  }
    13571  return res;
    13572 }
    13573 
    13575  VmaAllocator allocator,
    13576  const VkImageCreateInfo* pImageCreateInfo,
    13577  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13578  uint32_t* pMemoryTypeIndex)
    13579 {
    13580  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13581  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13582  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13583  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13584 
    13585  const VkDevice hDev = allocator->m_hDevice;
    13586  VkImage hImage = VK_NULL_HANDLE;
    13587  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13588  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13589  if(res == VK_SUCCESS)
    13590  {
    13591  VkMemoryRequirements memReq = {};
    13592  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13593  hDev, hImage, &memReq);
    13594 
    13595  res = vmaFindMemoryTypeIndex(
    13596  allocator,
    13597  memReq.memoryTypeBits,
    13598  pAllocationCreateInfo,
    13599  pMemoryTypeIndex);
    13600 
    13601  allocator->GetVulkanFunctions().vkDestroyImage(
    13602  hDev, hImage, allocator->GetAllocationCallbacks());
    13603  }
    13604  return res;
    13605 }
    13606 
    13607 VkResult vmaCreatePool(
    13608  VmaAllocator allocator,
    13609  const VmaPoolCreateInfo* pCreateInfo,
    13610  VmaPool* pPool)
    13611 {
    13612  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13613 
    13614  VMA_DEBUG_LOG("vmaCreatePool");
    13615 
    13616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13617 
    13618  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13619 
    13620 #if VMA_RECORDING_ENABLED
    13621  if(allocator->GetRecorder() != VMA_NULL)
    13622  {
    13623  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13624  }
    13625 #endif
    13626 
    13627  return res;
    13628 }
    13629 
    13630 void vmaDestroyPool(
    13631  VmaAllocator allocator,
    13632  VmaPool pool)
    13633 {
    13634  VMA_ASSERT(allocator);
    13635 
    13636  if(pool == VK_NULL_HANDLE)
    13637  {
    13638  return;
    13639  }
    13640 
    13641  VMA_DEBUG_LOG("vmaDestroyPool");
    13642 
    13643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13644 
    13645 #if VMA_RECORDING_ENABLED
    13646  if(allocator->GetRecorder() != VMA_NULL)
    13647  {
    13648  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13649  }
    13650 #endif
    13651 
    13652  allocator->DestroyPool(pool);
    13653 }
    13654 
    13655 void vmaGetPoolStats(
    13656  VmaAllocator allocator,
    13657  VmaPool pool,
    13658  VmaPoolStats* pPoolStats)
    13659 {
    13660  VMA_ASSERT(allocator && pool && pPoolStats);
    13661 
    13662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13663 
    13664  allocator->GetPoolStats(pool, pPoolStats);
    13665 }
    13666 
    13668  VmaAllocator allocator,
    13669  VmaPool pool,
    13670  size_t* pLostAllocationCount)
    13671 {
    13672  VMA_ASSERT(allocator && pool);
    13673 
    13674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13675 
    13676 #if VMA_RECORDING_ENABLED
    13677  if(allocator->GetRecorder() != VMA_NULL)
    13678  {
    13679  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13680  }
    13681 #endif
    13682 
    13683  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13684 }
    13685 
    13686 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13687 {
    13688  VMA_ASSERT(allocator && pool);
    13689 
    13690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13691 
    13692  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13693 
    13694  return allocator->CheckPoolCorruption(pool);
    13695 }
    13696 
    13697 VkResult vmaAllocateMemory(
    13698  VmaAllocator allocator,
    13699  const VkMemoryRequirements* pVkMemoryRequirements,
    13700  const VmaAllocationCreateInfo* pCreateInfo,
    13701  VmaAllocation* pAllocation,
    13702  VmaAllocationInfo* pAllocationInfo)
    13703 {
    13704  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13705 
    13706  VMA_DEBUG_LOG("vmaAllocateMemory");
    13707 
    13708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13709 
    13710  VkResult result = allocator->AllocateMemory(
    13711  *pVkMemoryRequirements,
    13712  false, // requiresDedicatedAllocation
    13713  false, // prefersDedicatedAllocation
    13714  VK_NULL_HANDLE, // dedicatedBuffer
    13715  VK_NULL_HANDLE, // dedicatedImage
    13716  *pCreateInfo,
    13717  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13718  pAllocation);
    13719 
    13720 #if VMA_RECORDING_ENABLED
    13721  if(allocator->GetRecorder() != VMA_NULL)
    13722  {
    13723  allocator->GetRecorder()->RecordAllocateMemory(
    13724  allocator->GetCurrentFrameIndex(),
    13725  *pVkMemoryRequirements,
    13726  *pCreateInfo,
    13727  *pAllocation);
    13728  }
    13729 #endif
    13730 
    13731  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13732  {
    13733  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13734  }
    13735 
    13736  return result;
    13737 }
    13738 
    13740  VmaAllocator allocator,
    13741  VkBuffer buffer,
    13742  const VmaAllocationCreateInfo* pCreateInfo,
    13743  VmaAllocation* pAllocation,
    13744  VmaAllocationInfo* pAllocationInfo)
    13745 {
    13746  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13747 
    13748  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13749 
    13750  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13751 
    13752  VkMemoryRequirements vkMemReq = {};
    13753  bool requiresDedicatedAllocation = false;
    13754  bool prefersDedicatedAllocation = false;
    13755  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13756  requiresDedicatedAllocation,
    13757  prefersDedicatedAllocation);
    13758 
    13759  VkResult result = allocator->AllocateMemory(
    13760  vkMemReq,
    13761  requiresDedicatedAllocation,
    13762  prefersDedicatedAllocation,
    13763  buffer, // dedicatedBuffer
    13764  VK_NULL_HANDLE, // dedicatedImage
    13765  *pCreateInfo,
    13766  VMA_SUBALLOCATION_TYPE_BUFFER,
    13767  pAllocation);
    13768 
    13769 #if VMA_RECORDING_ENABLED
    13770  if(allocator->GetRecorder() != VMA_NULL)
    13771  {
    13772  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13773  allocator->GetCurrentFrameIndex(),
    13774  vkMemReq,
    13775  requiresDedicatedAllocation,
    13776  prefersDedicatedAllocation,
    13777  *pCreateInfo,
    13778  *pAllocation);
    13779  }
    13780 #endif
    13781 
    13782  if(pAllocationInfo && result == VK_SUCCESS)
    13783  {
    13784  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13785  }
    13786 
    13787  return result;
    13788 }
    13789 
    13790 VkResult vmaAllocateMemoryForImage(
    13791  VmaAllocator allocator,
    13792  VkImage image,
    13793  const VmaAllocationCreateInfo* pCreateInfo,
    13794  VmaAllocation* pAllocation,
    13795  VmaAllocationInfo* pAllocationInfo)
    13796 {
    13797  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13798 
    13799  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13800 
    13801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13802 
    13803  VkMemoryRequirements vkMemReq = {};
    13804  bool requiresDedicatedAllocation = false;
    13805  bool prefersDedicatedAllocation = false;
    13806  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13807  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13808 
    13809  VkResult result = allocator->AllocateMemory(
    13810  vkMemReq,
    13811  requiresDedicatedAllocation,
    13812  prefersDedicatedAllocation,
    13813  VK_NULL_HANDLE, // dedicatedBuffer
    13814  image, // dedicatedImage
    13815  *pCreateInfo,
    13816  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13817  pAllocation);
    13818 
    13819 #if VMA_RECORDING_ENABLED
    13820  if(allocator->GetRecorder() != VMA_NULL)
    13821  {
    13822  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13823  allocator->GetCurrentFrameIndex(),
    13824  vkMemReq,
    13825  requiresDedicatedAllocation,
    13826  prefersDedicatedAllocation,
    13827  *pCreateInfo,
    13828  *pAllocation);
    13829  }
    13830 #endif
    13831 
    13832  if(pAllocationInfo && result == VK_SUCCESS)
    13833  {
    13834  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13835  }
    13836 
    13837  return result;
    13838 }
    13839 
    13840 void vmaFreeMemory(
    13841  VmaAllocator allocator,
    13842  VmaAllocation allocation)
    13843 {
    13844  VMA_ASSERT(allocator);
    13845 
    13846  if(allocation == VK_NULL_HANDLE)
    13847  {
    13848  return;
    13849  }
    13850 
    13851  VMA_DEBUG_LOG("vmaFreeMemory");
    13852 
    13853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13854 
    13855 #if VMA_RECORDING_ENABLED
    13856  if(allocator->GetRecorder() != VMA_NULL)
    13857  {
    13858  allocator->GetRecorder()->RecordFreeMemory(
    13859  allocator->GetCurrentFrameIndex(),
    13860  allocation);
    13861  }
    13862 #endif
    13863 
    13864  allocator->FreeMemory(allocation);
    13865 }
    13866 
    13868  VmaAllocator allocator,
    13869  VmaAllocation allocation,
    13870  VmaAllocationInfo* pAllocationInfo)
    13871 {
    13872  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13873 
    13874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13875 
    13876 #if VMA_RECORDING_ENABLED
    13877  if(allocator->GetRecorder() != VMA_NULL)
    13878  {
    13879  allocator->GetRecorder()->RecordGetAllocationInfo(
    13880  allocator->GetCurrentFrameIndex(),
    13881  allocation);
    13882  }
    13883 #endif
    13884 
    13885  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13886 }
    13887 
    13888 VkBool32 vmaTouchAllocation(
    13889  VmaAllocator allocator,
    13890  VmaAllocation allocation)
    13891 {
    13892  VMA_ASSERT(allocator && allocation);
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordTouchAllocation(
    13900  allocator->GetCurrentFrameIndex(),
    13901  allocation);
    13902  }
    13903 #endif
    13904 
    13905  return allocator->TouchAllocation(allocation);
    13906 }
    13907 
    13909  VmaAllocator allocator,
    13910  VmaAllocation allocation,
    13911  void* pUserData)
    13912 {
    13913  VMA_ASSERT(allocator && allocation);
    13914 
    13915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13916 
    13917  allocation->SetUserData(allocator, pUserData);
    13918 
    13919 #if VMA_RECORDING_ENABLED
    13920  if(allocator->GetRecorder() != VMA_NULL)
    13921  {
    13922  allocator->GetRecorder()->RecordSetAllocationUserData(
    13923  allocator->GetCurrentFrameIndex(),
    13924  allocation,
    13925  pUserData);
    13926  }
    13927 #endif
    13928 }
    13929 
    13931  VmaAllocator allocator,
    13932  VmaAllocation* pAllocation)
    13933 {
    13934  VMA_ASSERT(allocator && pAllocation);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13937 
    13938  allocator->CreateLostAllocation(pAllocation);
    13939 
    13940 #if VMA_RECORDING_ENABLED
    13941  if(allocator->GetRecorder() != VMA_NULL)
    13942  {
    13943  allocator->GetRecorder()->RecordCreateLostAllocation(
    13944  allocator->GetCurrentFrameIndex(),
    13945  *pAllocation);
    13946  }
    13947 #endif
    13948 }
    13949 
    13950 VkResult vmaMapMemory(
    13951  VmaAllocator allocator,
    13952  VmaAllocation allocation,
    13953  void** ppData)
    13954 {
    13955  VMA_ASSERT(allocator && allocation && ppData);
    13956 
    13957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13958 
    13959  VkResult res = allocator->Map(allocation, ppData);
    13960 
    13961 #if VMA_RECORDING_ENABLED
    13962  if(allocator->GetRecorder() != VMA_NULL)
    13963  {
    13964  allocator->GetRecorder()->RecordMapMemory(
    13965  allocator->GetCurrentFrameIndex(),
    13966  allocation);
    13967  }
    13968 #endif
    13969 
    13970  return res;
    13971 }
    13972 
    13973 void vmaUnmapMemory(
    13974  VmaAllocator allocator,
    13975  VmaAllocation allocation)
    13976 {
    13977  VMA_ASSERT(allocator && allocation);
    13978 
    13979  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13980 
    13981 #if VMA_RECORDING_ENABLED
    13982  if(allocator->GetRecorder() != VMA_NULL)
    13983  {
    13984  allocator->GetRecorder()->RecordUnmapMemory(
    13985  allocator->GetCurrentFrameIndex(),
    13986  allocation);
    13987  }
    13988 #endif
    13989 
    13990  allocator->Unmap(allocation);
    13991 }
    13992 
    13993 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13994 {
    13995  VMA_ASSERT(allocator && allocation);
    13996 
    13997  VMA_DEBUG_LOG("vmaFlushAllocation");
    13998 
    13999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14000 
    14001  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14002 
    14003 #if VMA_RECORDING_ENABLED
    14004  if(allocator->GetRecorder() != VMA_NULL)
    14005  {
    14006  allocator->GetRecorder()->RecordFlushAllocation(
    14007  allocator->GetCurrentFrameIndex(),
    14008  allocation, offset, size);
    14009  }
    14010 #endif
    14011 }
    14012 
    14013 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14014 {
    14015  VMA_ASSERT(allocator && allocation);
    14016 
    14017  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14018 
    14019  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14020 
    14021  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14022 
    14023 #if VMA_RECORDING_ENABLED
    14024  if(allocator->GetRecorder() != VMA_NULL)
    14025  {
    14026  allocator->GetRecorder()->RecordInvalidateAllocation(
    14027  allocator->GetCurrentFrameIndex(),
    14028  allocation, offset, size);
    14029  }
    14030 #endif
    14031 }
    14032 
    14033 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14034 {
    14035  VMA_ASSERT(allocator);
    14036 
    14037  VMA_DEBUG_LOG("vmaCheckCorruption");
    14038 
    14039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14040 
    14041  return allocator->CheckCorruption(memoryTypeBits);
    14042 }
    14043 
    14044 VkResult vmaDefragment(
    14045  VmaAllocator allocator,
    14046  VmaAllocation* pAllocations,
    14047  size_t allocationCount,
    14048  VkBool32* pAllocationsChanged,
    14049  const VmaDefragmentationInfo *pDefragmentationInfo,
    14050  VmaDefragmentationStats* pDefragmentationStats)
    14051 {
    14052  VMA_ASSERT(allocator && pAllocations);
    14053 
    14054  VMA_DEBUG_LOG("vmaDefragment");
    14055 
    14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14057 
    14058  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14059 }
    14060 
    14061 VkResult vmaBindBufferMemory(
    14062  VmaAllocator allocator,
    14063  VmaAllocation allocation,
    14064  VkBuffer buffer)
    14065 {
    14066  VMA_ASSERT(allocator && allocation && buffer);
    14067 
    14068  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14069 
    14070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14071 
    14072  return allocator->BindBufferMemory(allocation, buffer);
    14073 }
    14074 
    14075 VkResult vmaBindImageMemory(
    14076  VmaAllocator allocator,
    14077  VmaAllocation allocation,
    14078  VkImage image)
    14079 {
    14080  VMA_ASSERT(allocator && allocation && image);
    14081 
    14082  VMA_DEBUG_LOG("vmaBindImageMemory");
    14083 
    14084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14085 
    14086  return allocator->BindImageMemory(allocation, image);
    14087 }
    14088 
    14089 VkResult vmaCreateBuffer(
    14090  VmaAllocator allocator,
    14091  const VkBufferCreateInfo* pBufferCreateInfo,
    14092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14093  VkBuffer* pBuffer,
    14094  VmaAllocation* pAllocation,
    14095  VmaAllocationInfo* pAllocationInfo)
    14096 {
    14097  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14098 
    14099  VMA_DEBUG_LOG("vmaCreateBuffer");
    14100 
    14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14102 
    14103  *pBuffer = VK_NULL_HANDLE;
    14104  *pAllocation = VK_NULL_HANDLE;
    14105 
    14106  // 1. Create VkBuffer.
    14107  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14108  allocator->m_hDevice,
    14109  pBufferCreateInfo,
    14110  allocator->GetAllocationCallbacks(),
    14111  pBuffer);
    14112  if(res >= 0)
    14113  {
    14114  // 2. vkGetBufferMemoryRequirements.
    14115  VkMemoryRequirements vkMemReq = {};
    14116  bool requiresDedicatedAllocation = false;
    14117  bool prefersDedicatedAllocation = false;
    14118  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14119  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14120 
    14121  // Make sure alignment requirements for specific buffer usages reported
    14122  // in Physical Device Properties are included in alignment reported by memory requirements.
    14123  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14124  {
    14125  VMA_ASSERT(vkMemReq.alignment %
    14126  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14127  }
    14128  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14129  {
    14130  VMA_ASSERT(vkMemReq.alignment %
    14131  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14132  }
    14133  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14134  {
    14135  VMA_ASSERT(vkMemReq.alignment %
    14136  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14137  }
    14138 
    14139  // 3. Allocate memory using allocator.
    14140  res = allocator->AllocateMemory(
    14141  vkMemReq,
    14142  requiresDedicatedAllocation,
    14143  prefersDedicatedAllocation,
    14144  *pBuffer, // dedicatedBuffer
    14145  VK_NULL_HANDLE, // dedicatedImage
    14146  *pAllocationCreateInfo,
    14147  VMA_SUBALLOCATION_TYPE_BUFFER,
    14148  pAllocation);
    14149 
    14150 #if VMA_RECORDING_ENABLED
    14151  if(allocator->GetRecorder() != VMA_NULL)
    14152  {
    14153  allocator->GetRecorder()->RecordCreateBuffer(
    14154  allocator->GetCurrentFrameIndex(),
    14155  *pBufferCreateInfo,
    14156  *pAllocationCreateInfo,
    14157  *pAllocation);
    14158  }
    14159 #endif
    14160 
    14161  if(res >= 0)
    14162  {
    14163  // 3. Bind buffer with memory.
    14164  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14165  if(res >= 0)
    14166  {
    14167  // All steps succeeded.
    14168  #if VMA_STATS_STRING_ENABLED
    14169  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14170  #endif
    14171  if(pAllocationInfo != VMA_NULL)
    14172  {
    14173  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14174  }
    14175 
    14176  return VK_SUCCESS;
    14177  }
    14178  allocator->FreeMemory(*pAllocation);
    14179  *pAllocation = VK_NULL_HANDLE;
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14185  *pBuffer = VK_NULL_HANDLE;
    14186  return res;
    14187  }
    14188  return res;
    14189 }
    14190 
    14191 void vmaDestroyBuffer(
    14192  VmaAllocator allocator,
    14193  VkBuffer buffer,
    14194  VmaAllocation allocation)
    14195 {
    14196  VMA_ASSERT(allocator);
    14197 
    14198  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14199  {
    14200  return;
    14201  }
    14202 
    14203  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14204 
    14205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14206 
    14207 #if VMA_RECORDING_ENABLED
    14208  if(allocator->GetRecorder() != VMA_NULL)
    14209  {
    14210  allocator->GetRecorder()->RecordDestroyBuffer(
    14211  allocator->GetCurrentFrameIndex(),
    14212  allocation);
    14213  }
    14214 #endif
    14215 
    14216  if(buffer != VK_NULL_HANDLE)
    14217  {
    14218  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14219  }
    14220 
    14221  if(allocation != VK_NULL_HANDLE)
    14222  {
    14223  allocator->FreeMemory(allocation);
    14224  }
    14225 }
    14226 
    14227 VkResult vmaCreateImage(
    14228  VmaAllocator allocator,
    14229  const VkImageCreateInfo* pImageCreateInfo,
    14230  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14231  VkImage* pImage,
    14232  VmaAllocation* pAllocation,
    14233  VmaAllocationInfo* pAllocationInfo)
    14234 {
    14235  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14236 
    14237  VMA_DEBUG_LOG("vmaCreateImage");
    14238 
    14239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14240 
    14241  *pImage = VK_NULL_HANDLE;
    14242  *pAllocation = VK_NULL_HANDLE;
    14243 
    14244  // 1. Create VkImage.
    14245  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14246  allocator->m_hDevice,
    14247  pImageCreateInfo,
    14248  allocator->GetAllocationCallbacks(),
    14249  pImage);
    14250  if(res >= 0)
    14251  {
    14252  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14253  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14254  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14255 
    14256  // 2. Allocate memory using allocator.
    14257  VkMemoryRequirements vkMemReq = {};
    14258  bool requiresDedicatedAllocation = false;
    14259  bool prefersDedicatedAllocation = false;
    14260  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14261  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14262 
    14263  res = allocator->AllocateMemory(
    14264  vkMemReq,
    14265  requiresDedicatedAllocation,
    14266  prefersDedicatedAllocation,
    14267  VK_NULL_HANDLE, // dedicatedBuffer
    14268  *pImage, // dedicatedImage
    14269  *pAllocationCreateInfo,
    14270  suballocType,
    14271  pAllocation);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordCreateImage(
    14277  allocator->GetCurrentFrameIndex(),
    14278  *pImageCreateInfo,
    14279  *pAllocationCreateInfo,
    14280  *pAllocation);
    14281  }
    14282 #endif
    14283 
    14284  if(res >= 0)
    14285  {
    14286  // 3. Bind image with memory.
    14287  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14288  if(res >= 0)
    14289  {
    14290  // All steps succeeded.
    14291  #if VMA_STATS_STRING_ENABLED
    14292  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14293  #endif
    14294  if(pAllocationInfo != VMA_NULL)
    14295  {
    14296  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14297  }
    14298 
    14299  return VK_SUCCESS;
    14300  }
    14301  allocator->FreeMemory(*pAllocation);
    14302  *pAllocation = VK_NULL_HANDLE;
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14308  *pImage = VK_NULL_HANDLE;
    14309  return res;
    14310  }
    14311  return res;
    14312 }
    14313 
    14314 void vmaDestroyImage(
    14315  VmaAllocator allocator,
    14316  VkImage image,
    14317  VmaAllocation allocation)
    14318 {
    14319  VMA_ASSERT(allocator);
    14320 
    14321  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14322  {
    14323  return;
    14324  }
    14325 
    14326  VMA_DEBUG_LOG("vmaDestroyImage");
    14327 
    14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14329 
    14330 #if VMA_RECORDING_ENABLED
    14331  if(allocator->GetRecorder() != VMA_NULL)
    14332  {
    14333  allocator->GetRecorder()->RecordDestroyImage(
    14334  allocator->GetCurrentFrameIndex(),
    14335  allocation);
    14336  }
    14337 #endif
    14338 
    14339  if(image != VK_NULL_HANDLE)
    14340  {
    14341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14342  }
    14343  if(allocation != VK_NULL_HANDLE)
    14344  {
    14345  allocator->FreeMemory(allocation);
    14346  }
    14347 }
    14348 
    14349 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1575
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1876
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1628
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1632
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1602
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2194
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1583
    +
    Definition: vk_mem_alloc.h:1606
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2198
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1587
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1829
    -
    Definition: vk_mem_alloc.h:1932
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1575
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2294
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2539
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2083
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1472
    +
    Definition: vk_mem_alloc.h:1833
    +
    Definition: vk_mem_alloc.h:1936
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1579
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2298
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1629
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2543
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2087
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1476
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2175
    -
    Definition: vk_mem_alloc.h:1909
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1564
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1982
    -
    Definition: vk_mem_alloc.h:1856
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1637
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2111
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2179
    +
    Definition: vk_mem_alloc.h:1913
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1568
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1986
    +
    Definition: vk_mem_alloc.h:1860
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1641
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2115
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1690
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1622
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1694
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1626
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1860
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1864
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1762
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1580
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1761
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2543
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1766
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1584
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1765
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2547
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1654
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1771
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2551
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1966
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2534
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1581
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1506
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1658
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1775
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2555
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1970
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2538
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1585
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1510
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1631
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1635
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2125
    -
    Definition: vk_mem_alloc.h:2119
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1697
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2304
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2129
    +
    Definition: vk_mem_alloc.h:2123
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1701
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2308
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1576
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1600
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2003
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2145
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2181
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1580
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1604
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2007
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2149
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2185
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1562
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2128
    +
    Definition: vk_mem_alloc.h:1566
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2132
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1807
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1811
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2529
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2533
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2547
    -
    Definition: vk_mem_alloc.h:1846
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1990
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1579
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2551
    +
    Definition: vk_mem_alloc.h:1850
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1994
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1583
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1767
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1512
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1771
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1516
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1533
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1537
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1604
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1538
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2549
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1608
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1542
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2553
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1977
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2191
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1981
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2195
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1572
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1750
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2140
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1525
    -
    Definition: vk_mem_alloc.h:2115
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1576
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1754
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2144
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1529
    +
    Definition: vk_mem_alloc.h:2119
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1916
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1763
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1529
    -
    Definition: vk_mem_alloc.h:1940
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2131
    -
    Definition: vk_mem_alloc.h:1855
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1578
    +
    Definition: vk_mem_alloc.h:1920
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1767
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1533
    +
    Definition: vk_mem_alloc.h:1944
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2135
    +
    Definition: vk_mem_alloc.h:1859
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1582
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1972
    -
    Definition: vk_mem_alloc.h:1963
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1976
    +
    Definition: vk_mem_alloc.h:1967
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1753
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1574
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2153
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1640
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2184
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1961
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:1996
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1757
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1578
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2157
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1644
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2188
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1965
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2000
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1678
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1769
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1896
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1762
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1682
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1773
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1900
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1766
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1585
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1610
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1527
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1584
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1589
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1614
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1531
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1588
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2167
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1577
    -
    Definition: vk_mem_alloc.h:1927
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2171
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1581
    +
    Definition: vk_mem_alloc.h:1931
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1618
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2318
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1634
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1762
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1759
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1622
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2322
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1638
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1766
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1763
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2172
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2176
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1936
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2299
    -
    Definition: vk_mem_alloc.h:1947
    -
    Definition: vk_mem_alloc.h:1959
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2545
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1570
    +
    Definition: vk_mem_alloc.h:1940
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2303
    +
    Definition: vk_mem_alloc.h:1951
    +
    Definition: vk_mem_alloc.h:1963
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2549
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1574
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1757
    -
    Definition: vk_mem_alloc.h:1812
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2121
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1761
    +
    Definition: vk_mem_alloc.h:1816
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2125
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1607
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1755
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1582
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1586
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1883
    -
    Definition: vk_mem_alloc.h:1954
    -
    Definition: vk_mem_alloc.h:1839
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2313
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1611
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1759
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1586
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1590
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1887
    +
    Definition: vk_mem_alloc.h:1958
    +
    Definition: vk_mem_alloc.h:1843
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2317
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1560
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1564
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1573
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2100
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2280
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1577
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2104
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2284
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1944
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2065
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1763
    +
    Definition: vk_mem_alloc.h:1948
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2069
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1767
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1594
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1770
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1598
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1774
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2178
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1763
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2182
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1767
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2285
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2289
    diff --git a/docs/html/search/variables_c.html b/docs/html/search/variables_c.html new file mode 100644 index 0000000..75709df --- /dev/null +++ b/docs/html/search/variables_c.html @@ -0,0 +1,30 @@ + + + + + + + + + +
    +
    Loading...
    +
    + +
    Searching...
    +
    No Matches
    + +
    + + diff --git a/docs/html/search/variables_c.js b/docs/html/search/variables_c.js new file mode 100644 index 0000000..e7b9e7f --- /dev/null +++ b/docs/html/search/variables_c.js @@ -0,0 +1,20 @@ +var searchData= +[ + ['vkallocatememory',['vkAllocateMemory',['../struct_vma_vulkan_functions.html#a2943bf99dfd784a0e8f599d987e22e6c',1,'VmaVulkanFunctions']]], + ['vkbindbuffermemory',['vkBindBufferMemory',['../struct_vma_vulkan_functions.html#a94fc4f3a605d9880bb3c0ba2c2fc80b2',1,'VmaVulkanFunctions']]], + ['vkbindimagememory',['vkBindImageMemory',['../struct_vma_vulkan_functions.html#a1338d96a128a5ade648b8d934907c637',1,'VmaVulkanFunctions']]], + ['vkcmdcopybuffer',['vkCmdCopyBuffer',['../struct_vma_vulkan_functions.html#ae5c0db8c89a3b82593dc16aa6a49fa3a',1,'VmaVulkanFunctions']]], + ['vkcreatebuffer',['vkCreateBuffer',['../struct_vma_vulkan_functions.html#ae8084315a25006271a2edfc3a447519f',1,'VmaVulkanFunctions']]], + ['vkcreateimage',['vkCreateImage',['../struct_vma_vulkan_functions.html#a23ebe70be515b9b5010a1d691200e325',1,'VmaVulkanFunctions']]], + ['vkdestroybuffer',['vkDestroyBuffer',['../struct_vma_vulkan_functions.html#a7e054606faddb07f0e8556f3ed317d45',1,'VmaVulkanFunctions']]], + ['vkdestroyimage',['vkDestroyImage',['../struct_vma_vulkan_functions.html#a90b898227039b1dcb3520f6e91f09ffa',1,'VmaVulkanFunctions']]], + ['vkflushmappedmemoryranges',['vkFlushMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a33c322f4c4ad2810f8a9c97a277572f9',1,'VmaVulkanFunctions']]], + ['vkfreememory',['vkFreeMemory',['../struct_vma_vulkan_functions.html#a4c658701778564d62034255b5dda91b4',1,'VmaVulkanFunctions']]], + ['vkgetbuffermemoryrequirements',['vkGetBufferMemoryRequirements',['../struct_vma_vulkan_functions.html#a5b92901df89a4194b0d12f6071d4d143',1,'VmaVulkanFunctions']]], + ['vkgetimagememoryrequirements',['vkGetImageMemoryRequirements',['../struct_vma_vulkan_functions.html#a475f6f49f8debe4d10800592606d53f4',1,'VmaVulkanFunctions']]], + ['vkgetphysicaldevicememoryproperties',['vkGetPhysicalDeviceMemoryProperties',['../struct_vma_vulkan_functions.html#a60d25c33bba06bb8592e6875cbaa9830',1,'VmaVulkanFunctions']]], + ['vkgetphysicaldeviceproperties',['vkGetPhysicalDeviceProperties',['../struct_vma_vulkan_functions.html#a77b7a74082823e865dd6546623468f96',1,'VmaVulkanFunctions']]], + ['vkinvalidatemappedmemoryranges',['vkInvalidateMappedMemoryRanges',['../struct_vma_vulkan_functions.html#a5c1093bc32386a8060c37c9f282078a1',1,'VmaVulkanFunctions']]], + ['vkmapmemory',['vkMapMemory',['../struct_vma_vulkan_functions.html#ab5c1f38dea3a2cf00dc9eb4f57218c49',1,'VmaVulkanFunctions']]], + ['vkunmapmemory',['vkUnmapMemory',['../struct_vma_vulkan_functions.html#acc798589736f0becb317fc2196c1d8b9',1,'VmaVulkanFunctions']]] +]; diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index d3e0ccf..b5e44ad 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,188 +65,188 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1468 /*
    1469 Define this macro to 0/1 to disable/enable support for recording functionality,
    1470 available through VmaAllocatorCreateInfo::pRecordSettings.
    1471 */
    1472 #ifndef VMA_RECORDING_ENABLED
    1473  #ifdef _WIN32
    1474  #define VMA_RECORDING_ENABLED 1
    1475  #else
    1476  #define VMA_RECORDING_ENABLED 0
    1477  #endif
    1478 #endif
    1479 
    1480 #ifndef NOMINMAX
    1481  #define NOMINMAX // For windows.h
    1482 #endif
    1483 
    1484 #include <vulkan/vulkan.h>
    1485 
    1486 #if VMA_RECORDING_ENABLED
    1487  #include <windows.h>
    1488 #endif
    1489 
    1490 #if !defined(VMA_DEDICATED_ALLOCATION)
    1491  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1492  #define VMA_DEDICATED_ALLOCATION 1
    1493  #else
    1494  #define VMA_DEDICATED_ALLOCATION 0
    1495  #endif
    1496 #endif
    1497 
    1507 VK_DEFINE_HANDLE(VmaAllocator)
    1508 
    1509 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1511  VmaAllocator allocator,
    1512  uint32_t memoryType,
    1513  VkDeviceMemory memory,
    1514  VkDeviceSize size);
    1516 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1517  VmaAllocator allocator,
    1518  uint32_t memoryType,
    1519  VkDeviceMemory memory,
    1520  VkDeviceSize size);
    1521 
    1535 
    1565 
    1568 typedef VkFlags VmaAllocatorCreateFlags;
    1569 
    1574 typedef struct VmaVulkanFunctions {
    1575  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1576  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1577  PFN_vkAllocateMemory vkAllocateMemory;
    1578  PFN_vkFreeMemory vkFreeMemory;
    1579  PFN_vkMapMemory vkMapMemory;
    1580  PFN_vkUnmapMemory vkUnmapMemory;
    1581  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1582  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1583  PFN_vkBindBufferMemory vkBindBufferMemory;
    1584  PFN_vkBindImageMemory vkBindImageMemory;
    1585  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1586  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1587  PFN_vkCreateBuffer vkCreateBuffer;
    1588  PFN_vkDestroyBuffer vkDestroyBuffer;
    1589  PFN_vkCreateImage vkCreateImage;
    1590  PFN_vkDestroyImage vkDestroyImage;
    1591 #if VMA_DEDICATED_ALLOCATION
    1592  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1593  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1594 #endif
    1596 
    1598 typedef enum VmaRecordFlagBits {
    1605 
    1608 typedef VkFlags VmaRecordFlags;
    1609 
    1611 typedef struct VmaRecordSettings
    1612 {
    1622  const char* pFilePath;
    1624 
    1627 {
    1631 
    1632  VkPhysicalDevice physicalDevice;
    1634 
    1635  VkDevice device;
    1637 
    1640 
    1641  const VkAllocationCallbacks* pAllocationCallbacks;
    1643 
    1682  const VkDeviceSize* pHeapSizeLimit;
    1703 
    1705 VkResult vmaCreateAllocator(
    1706  const VmaAllocatorCreateInfo* pCreateInfo,
    1707  VmaAllocator* pAllocator);
    1708 
    1710 void vmaDestroyAllocator(
    1711  VmaAllocator allocator);
    1712 
    1718  VmaAllocator allocator,
    1719  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1720 
    1726  VmaAllocator allocator,
    1727  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1728 
    1736  VmaAllocator allocator,
    1737  uint32_t memoryTypeIndex,
    1738  VkMemoryPropertyFlags* pFlags);
    1739 
    1749  VmaAllocator allocator,
    1750  uint32_t frameIndex);
    1751 
    1754 typedef struct VmaStatInfo
    1755 {
    1757  uint32_t blockCount;
    1763  VkDeviceSize usedBytes;
    1765  VkDeviceSize unusedBytes;
    1768 } VmaStatInfo;
    1769 
    1771 typedef struct VmaStats
    1772 {
    1773  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1774  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1776 } VmaStats;
    1777 
    1779 void vmaCalculateStats(
    1780  VmaAllocator allocator,
    1781  VmaStats* pStats);
    1782 
    1783 #define VMA_STATS_STRING_ENABLED 1
    1784 
    1785 #if VMA_STATS_STRING_ENABLED
    1786 
    1788 
    1790 void vmaBuildStatsString(
    1791  VmaAllocator allocator,
    1792  char** ppStatsString,
    1793  VkBool32 detailedMap);
    1794 
    1795 void vmaFreeStatsString(
    1796  VmaAllocator allocator,
    1797  char* pStatsString);
    1798 
    1799 #endif // #if VMA_STATS_STRING_ENABLED
    1800 
    1809 VK_DEFINE_HANDLE(VmaPool)
    1810 
    1811 typedef enum VmaMemoryUsage
    1812 {
    1861 } VmaMemoryUsage;
    1862 
    1877 
    1932 
    1945 
    1955 
    1962 
    1966 
    1968 {
    1981  VkMemoryPropertyFlags requiredFlags;
    1986  VkMemoryPropertyFlags preferredFlags;
    1994  uint32_t memoryTypeBits;
    2007  void* pUserData;
    2009 
    2026 VkResult vmaFindMemoryTypeIndex(
    2027  VmaAllocator allocator,
    2028  uint32_t memoryTypeBits,
    2029  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2030  uint32_t* pMemoryTypeIndex);
    2031 
    2045  VmaAllocator allocator,
    2046  const VkBufferCreateInfo* pBufferCreateInfo,
    2047  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2048  uint32_t* pMemoryTypeIndex);
    2049 
    2063  VmaAllocator allocator,
    2064  const VkImageCreateInfo* pImageCreateInfo,
    2065  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2066  uint32_t* pMemoryTypeIndex);
    2067 
    2088 
    2105 
    2116 
    2122 
    2125 typedef VkFlags VmaPoolCreateFlags;
    2126 
    2129 typedef struct VmaPoolCreateInfo {
    2144  VkDeviceSize blockSize;
    2173 
    2176 typedef struct VmaPoolStats {
    2179  VkDeviceSize size;
    2182  VkDeviceSize unusedSize;
    2195  VkDeviceSize unusedRangeSizeMax;
    2198  size_t blockCount;
    2199 } VmaPoolStats;
    2200 
    2207 VkResult vmaCreatePool(
    2208  VmaAllocator allocator,
    2209  const VmaPoolCreateInfo* pCreateInfo,
    2210  VmaPool* pPool);
    2211 
    2214 void vmaDestroyPool(
    2215  VmaAllocator allocator,
    2216  VmaPool pool);
    2217 
    2224 void vmaGetPoolStats(
    2225  VmaAllocator allocator,
    2226  VmaPool pool,
    2227  VmaPoolStats* pPoolStats);
    2228 
    2236  VmaAllocator allocator,
    2237  VmaPool pool,
    2238  size_t* pLostAllocationCount);
    2239 
    2254 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2255 
    2280 VK_DEFINE_HANDLE(VmaAllocation)
    2281 
    2282 
    2284 typedef struct VmaAllocationInfo {
    2289  uint32_t memoryType;
    2298  VkDeviceMemory deviceMemory;
    2303  VkDeviceSize offset;
    2308  VkDeviceSize size;
    2322  void* pUserData;
    2324 
    2335 VkResult vmaAllocateMemory(
    2336  VmaAllocator allocator,
    2337  const VkMemoryRequirements* pVkMemoryRequirements,
    2338  const VmaAllocationCreateInfo* pCreateInfo,
    2339  VmaAllocation* pAllocation,
    2340  VmaAllocationInfo* pAllocationInfo);
    2341 
    2349  VmaAllocator allocator,
    2350  VkBuffer buffer,
    2351  const VmaAllocationCreateInfo* pCreateInfo,
    2352  VmaAllocation* pAllocation,
    2353  VmaAllocationInfo* pAllocationInfo);
    2354 
    2356 VkResult vmaAllocateMemoryForImage(
    2357  VmaAllocator allocator,
    2358  VkImage image,
    2359  const VmaAllocationCreateInfo* pCreateInfo,
    2360  VmaAllocation* pAllocation,
    2361  VmaAllocationInfo* pAllocationInfo);
    2362 
    2364 void vmaFreeMemory(
    2365  VmaAllocator allocator,
    2366  VmaAllocation allocation);
    2367 
    2385  VmaAllocator allocator,
    2386  VmaAllocation allocation,
    2387  VmaAllocationInfo* pAllocationInfo);
    2388 
    2403 VkBool32 vmaTouchAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation);
    2406 
    2421  VmaAllocator allocator,
    2422  VmaAllocation allocation,
    2423  void* pUserData);
    2424 
    2436  VmaAllocator allocator,
    2437  VmaAllocation* pAllocation);
    2438 
    2473 VkResult vmaMapMemory(
    2474  VmaAllocator allocator,
    2475  VmaAllocation allocation,
    2476  void** ppData);
    2477 
    2482 void vmaUnmapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation);
    2485 
    2498 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2499 
    2512 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2513 
    2530 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2531 
    2533 typedef struct VmaDefragmentationInfo {
    2538  VkDeviceSize maxBytesToMove;
    2545 
    2547 typedef struct VmaDefragmentationStats {
    2549  VkDeviceSize bytesMoved;
    2551  VkDeviceSize bytesFreed;
    2557 
    2596 VkResult vmaDefragment(
    2597  VmaAllocator allocator,
    2598  VmaAllocation* pAllocations,
    2599  size_t allocationCount,
    2600  VkBool32* pAllocationsChanged,
    2601  const VmaDefragmentationInfo *pDefragmentationInfo,
    2602  VmaDefragmentationStats* pDefragmentationStats);
    2603 
    2616 VkResult vmaBindBufferMemory(
    2617  VmaAllocator allocator,
    2618  VmaAllocation allocation,
    2619  VkBuffer buffer);
    2620 
    2633 VkResult vmaBindImageMemory(
    2634  VmaAllocator allocator,
    2635  VmaAllocation allocation,
    2636  VkImage image);
    2637 
    2664 VkResult vmaCreateBuffer(
    2665  VmaAllocator allocator,
    2666  const VkBufferCreateInfo* pBufferCreateInfo,
    2667  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2668  VkBuffer* pBuffer,
    2669  VmaAllocation* pAllocation,
    2670  VmaAllocationInfo* pAllocationInfo);
    2671 
    2683 void vmaDestroyBuffer(
    2684  VmaAllocator allocator,
    2685  VkBuffer buffer,
    2686  VmaAllocation allocation);
    2687 
    2689 VkResult vmaCreateImage(
    2690  VmaAllocator allocator,
    2691  const VkImageCreateInfo* pImageCreateInfo,
    2692  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2693  VkImage* pImage,
    2694  VmaAllocation* pAllocation,
    2695  VmaAllocationInfo* pAllocationInfo);
    2696 
    2708 void vmaDestroyImage(
    2709  VmaAllocator allocator,
    2710  VkImage image,
    2711  VmaAllocation allocation);
    2712 
    2713 #ifdef __cplusplus
    2714 }
    2715 #endif
    2716 
    2717 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2718 
    2719 // For Visual Studio IntelliSense.
    2720 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2721 #define VMA_IMPLEMENTATION
    2722 #endif
    2723 
    2724 #ifdef VMA_IMPLEMENTATION
    2725 #undef VMA_IMPLEMENTATION
    2726 
    2727 #include <cstdint>
    2728 #include <cstdlib>
    2729 #include <cstring>
    2730 
    2731 /*******************************************************************************
    2732 CONFIGURATION SECTION
    2733 
    2734 Define some of these macros before each #include of this header or change them
    2735 here if you need other then default behavior depending on your environment.
    2736 */
    2737 
    2738 /*
    2739 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2740 internally, like:
    2741 
    2742  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2743 
    2744 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2745 VmaAllocatorCreateInfo::pVulkanFunctions.
    2746 */
    2747 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2748 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2749 #endif
    2750 
    2751 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2752 //#define VMA_USE_STL_CONTAINERS 1
    2753 
    2754 /* Set this macro to 1 to make the library including and using STL containers:
    2755 std::pair, std::vector, std::list, std::unordered_map.
    2756 
    2757 Set it to 0 or undefined to make the library using its own implementation of
    2758 the containers.
    2759 */
    2760 #if VMA_USE_STL_CONTAINERS
    2761  #define VMA_USE_STL_VECTOR 1
    2762  #define VMA_USE_STL_UNORDERED_MAP 1
    2763  #define VMA_USE_STL_LIST 1
    2764 #endif
    2765 
    2766 #if VMA_USE_STL_VECTOR
    2767  #include <vector>
    2768 #endif
    2769 
    2770 #if VMA_USE_STL_UNORDERED_MAP
    2771  #include <unordered_map>
    2772 #endif
    2773 
    2774 #if VMA_USE_STL_LIST
    2775  #include <list>
    2776 #endif
    2777 
    2778 /*
    2779 Following headers are used in this CONFIGURATION section only, so feel free to
    2780 remove them if not needed.
    2781 */
    2782 #include <cassert> // for assert
    2783 #include <algorithm> // for min, max
    2784 #include <mutex> // for std::mutex
    2785 #include <atomic> // for std::atomic
    2786 
    2787 #ifndef VMA_NULL
    2788  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2789  #define VMA_NULL nullptr
    2790 #endif
    2791 
    2792 #if defined(__APPLE__) || defined(__ANDROID__)
    2793 #include <cstdlib>
    2794 void *aligned_alloc(size_t alignment, size_t size)
    2795 {
    2796  // alignment must be >= sizeof(void*)
    2797  if(alignment < sizeof(void*))
    2798  {
    2799  alignment = sizeof(void*);
    2800  }
    2801 
    2802  void *pointer;
    2803  if(posix_memalign(&pointer, alignment, size) == 0)
    2804  return pointer;
    2805  return VMA_NULL;
    2806 }
    2807 #endif
    2808 
    2809 // If your compiler is not compatible with C++11 and definition of
    2810 // aligned_alloc() function is missing, uncommeting following line may help:
    2811 
    2812 //#include <malloc.h>
    2813 
    2814 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2815 #ifndef VMA_ASSERT
    2816  #ifdef _DEBUG
    2817  #define VMA_ASSERT(expr) assert(expr)
    2818  #else
    2819  #define VMA_ASSERT(expr)
    2820  #endif
    2821 #endif
    2822 
    2823 // Assert that will be called very often, like inside data structures e.g. operator[].
    2824 // Making it non-empty can make program slow.
    2825 #ifndef VMA_HEAVY_ASSERT
    2826  #ifdef _DEBUG
    2827  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2828  #else
    2829  #define VMA_HEAVY_ASSERT(expr)
    2830  #endif
    2831 #endif
    2832 
    2833 #ifndef VMA_ALIGN_OF
    2834  #define VMA_ALIGN_OF(type) (__alignof(type))
    2835 #endif
    2836 
    2837 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2838  #if defined(_WIN32)
    2839  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2840  #else
    2841  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2842  #endif
    2843 #endif
    2844 
    2845 #ifndef VMA_SYSTEM_FREE
    2846  #if defined(_WIN32)
    2847  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2848  #else
    2849  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2850  #endif
    2851 #endif
    2852 
    2853 #ifndef VMA_MIN
    2854  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2855 #endif
    2856 
    2857 #ifndef VMA_MAX
    2858  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2859 #endif
    2860 
    2861 #ifndef VMA_SWAP
    2862  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2863 #endif
    2864 
    2865 #ifndef VMA_SORT
    2866  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2867 #endif
    2868 
    2869 #ifndef VMA_DEBUG_LOG
    2870  #define VMA_DEBUG_LOG(format, ...)
    2871  /*
    2872  #define VMA_DEBUG_LOG(format, ...) do { \
    2873  printf(format, __VA_ARGS__); \
    2874  printf("\n"); \
    2875  } while(false)
    2876  */
    2877 #endif
    2878 
    2879 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2880 #if VMA_STATS_STRING_ENABLED
    2881  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2882  {
    2883  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2884  }
    2885  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2886  {
    2887  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2888  }
    2889  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2890  {
    2891  snprintf(outStr, strLen, "%p", ptr);
    2892  }
    2893 #endif
    2894 
    2895 #ifndef VMA_MUTEX
    2896  class VmaMutex
    2897  {
    2898  public:
    2899  VmaMutex() { }
    2900  ~VmaMutex() { }
    2901  void Lock() { m_Mutex.lock(); }
    2902  void Unlock() { m_Mutex.unlock(); }
    2903  private:
    2904  std::mutex m_Mutex;
    2905  };
    2906  #define VMA_MUTEX VmaMutex
    2907 #endif
    2908 
    2909 /*
    2910 If providing your own implementation, you need to implement a subset of std::atomic:
    2911 
    2912 - Constructor(uint32_t desired)
    2913 - uint32_t load() const
    2914 - void store(uint32_t desired)
    2915 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2916 */
    2917 #ifndef VMA_ATOMIC_UINT32
    2918  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2922 
    2926  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2927 #endif
    2928 
    2929 #ifndef VMA_DEBUG_ALIGNMENT
    2930 
    2934  #define VMA_DEBUG_ALIGNMENT (1)
    2935 #endif
    2936 
    2937 #ifndef VMA_DEBUG_MARGIN
    2938 
    2942  #define VMA_DEBUG_MARGIN (0)
    2943 #endif
    2944 
    2945 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2946 
    2950  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2951 #endif
    2952 
    2953 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2954 
    2959  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2963 
    2967  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2968 #endif
    2969 
    2970 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2971 
    2975  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2976 #endif
    2977 
    2978 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2979  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2984  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2986 #endif
    2987 
    2988 #ifndef VMA_CLASS_NO_COPY
    2989  #define VMA_CLASS_NO_COPY(className) \
    2990  private: \
    2991  className(const className&) = delete; \
    2992  className& operator=(const className&) = delete;
    2993 #endif
    2994 
    2995 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    2996 
    2997 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    2998 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    2999 
    3000 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3001 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3002 
    3003 /*******************************************************************************
    3004 END OF CONFIGURATION
    3005 */
    3006 
    3007 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3008  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3009 
    3010 // Returns number of bits set to 1 in (v).
    3011 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3012 {
    3013  uint32_t c = v - ((v >> 1) & 0x55555555);
    3014  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3015  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3016  c = ((c >> 8) + c) & 0x00FF00FF;
    3017  c = ((c >> 16) + c) & 0x0000FFFF;
    3018  return c;
    3019 }
    3020 
    3021 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3022 // Use types like uint32_t, uint64_t as T.
    3023 template <typename T>
    3024 static inline T VmaAlignUp(T val, T align)
    3025 {
    3026  return (val + align - 1) / align * align;
    3027 }
    3028 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3029 // Use types like uint32_t, uint64_t as T.
    3030 template <typename T>
    3031 static inline T VmaAlignDown(T val, T align)
    3032 {
    3033  return val / align * align;
    3034 }
    3035 
    3036 // Division with mathematical rounding to nearest number.
    3037 template <typename T>
    3038 static inline T VmaRoundDiv(T x, T y)
    3039 {
    3040  return (x + (y / (T)2)) / y;
    3041 }
    3042 
    3043 /*
    3044 Returns true if given number is a power of two.
    3045 T must be unsigned integer number or signed integer but always nonnegative.
    3046 For 0 returns true.
    3047 */
    3048 template <typename T>
    3049 inline bool VmaIsPow2(T x)
    3050 {
    3051  return (x & (x-1)) == 0;
    3052 }
    3053 
    3054 // Returns smallest power of 2 greater or equal to v.
    3055 static inline uint32_t VmaNextPow2(uint32_t v)
    3056 {
    3057  v--;
    3058  v |= v >> 1;
    3059  v |= v >> 2;
    3060  v |= v >> 4;
    3061  v |= v >> 8;
    3062  v |= v >> 16;
    3063  v++;
    3064  return v;
    3065 }
    3066 static inline uint64_t VmaNextPow2(uint64_t v)
    3067 {
    3068  v--;
    3069  v |= v >> 1;
    3070  v |= v >> 2;
    3071  v |= v >> 4;
    3072  v |= v >> 8;
    3073  v |= v >> 16;
    3074  v |= v >> 32;
    3075  v++;
    3076  return v;
    3077 }
    3078 
    3079 // Returns largest power of 2 less or equal to v.
    3080 static inline uint32_t VmaPrevPow2(uint32_t v)
    3081 {
    3082  v |= v >> 1;
    3083  v |= v >> 2;
    3084  v |= v >> 4;
    3085  v |= v >> 8;
    3086  v |= v >> 16;
    3087  v = v ^ (v >> 1);
    3088  return v;
    3089 }
    3090 static inline uint64_t VmaPrevPow2(uint64_t v)
    3091 {
    3092  v |= v >> 1;
    3093  v |= v >> 2;
    3094  v |= v >> 4;
    3095  v |= v >> 8;
    3096  v |= v >> 16;
    3097  v |= v >> 32;
    3098  v = v ^ (v >> 1);
    3099  return v;
    3100 }
    3101 
    3102 static inline bool VmaStrIsEmpty(const char* pStr)
    3103 {
    3104  return pStr == VMA_NULL || *pStr == '\0';
    3105 }
    3106 
    3107 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3108 {
    3109  switch(algorithm)
    3110  {
    3112  return "Linear";
    3114  return "Buddy";
    3115  case 0:
    3116  return "Default";
    3117  default:
    3118  VMA_ASSERT(0);
    3119  return "";
    3120  }
    3121 }
    3122 
    3123 #ifndef VMA_SORT
    3124 
    3125 template<typename Iterator, typename Compare>
    3126 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3127 {
    3128  Iterator centerValue = end; --centerValue;
    3129  Iterator insertIndex = beg;
    3130  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3131  {
    3132  if(cmp(*memTypeIndex, *centerValue))
    3133  {
    3134  if(insertIndex != memTypeIndex)
    3135  {
    3136  VMA_SWAP(*memTypeIndex, *insertIndex);
    3137  }
    3138  ++insertIndex;
    3139  }
    3140  }
    3141  if(insertIndex != centerValue)
    3142  {
    3143  VMA_SWAP(*insertIndex, *centerValue);
    3144  }
    3145  return insertIndex;
    3146 }
    3147 
    3148 template<typename Iterator, typename Compare>
    3149 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3150 {
    3151  if(beg < end)
    3152  {
    3153  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3154  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3155  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3156  }
    3157 }
    3158 
    3159 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3160 
    3161 #endif // #ifndef VMA_SORT
    3162 
    3163 /*
    3164 Returns true if two memory blocks occupy overlapping pages.
    3165 ResourceA must be in less memory offset than ResourceB.
    3166 
    3167 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3168 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3169 */
    3170 static inline bool VmaBlocksOnSamePage(
    3171  VkDeviceSize resourceAOffset,
    3172  VkDeviceSize resourceASize,
    3173  VkDeviceSize resourceBOffset,
    3174  VkDeviceSize pageSize)
    3175 {
    3176  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3177  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3178  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3179  VkDeviceSize resourceBStart = resourceBOffset;
    3180  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3181  return resourceAEndPage == resourceBStartPage;
    3182 }
    3183 
    3184 enum VmaSuballocationType
    3185 {
    3186  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3187  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3188  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3189  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3190  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3191  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3192  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3193 };
    3194 
    3195 /*
    3196 Returns true if given suballocation types could conflict and must respect
    3197 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3198 or linear image and another one is optimal image. If type is unknown, behave
    3199 conservatively.
    3200 */
    3201 static inline bool VmaIsBufferImageGranularityConflict(
    3202  VmaSuballocationType suballocType1,
    3203  VmaSuballocationType suballocType2)
    3204 {
    3205  if(suballocType1 > suballocType2)
    3206  {
    3207  VMA_SWAP(suballocType1, suballocType2);
    3208  }
    3209 
    3210  switch(suballocType1)
    3211  {
    3212  case VMA_SUBALLOCATION_TYPE_FREE:
    3213  return false;
    3214  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3215  return true;
    3216  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3217  return
    3218  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3219  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3220  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3221  return
    3222  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3223  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3224  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3225  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3228  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3229  return false;
    3230  default:
    3231  VMA_ASSERT(0);
    3232  return true;
    3233  }
    3234 }
    3235 
    3236 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3237 {
    3238  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3239  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3240  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3241  {
    3242  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3243  }
    3244 }
    3245 
    3246 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3247 {
    3248  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3249  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3250  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3251  {
    3252  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3253  {
    3254  return false;
    3255  }
    3256  }
    3257  return true;
    3258 }
    3259 
    3260 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3261 struct VmaMutexLock
    3262 {
    3263  VMA_CLASS_NO_COPY(VmaMutexLock)
    3264 public:
    3265  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3266  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3267  {
    3268  if(m_pMutex)
    3269  {
    3270  m_pMutex->Lock();
    3271  }
    3272  }
    3273 
    3274  ~VmaMutexLock()
    3275  {
    3276  if(m_pMutex)
    3277  {
    3278  m_pMutex->Unlock();
    3279  }
    3280  }
    3281 
    3282 private:
    3283  VMA_MUTEX* m_pMutex;
    3284 };
    3285 
    3286 #if VMA_DEBUG_GLOBAL_MUTEX
    3287  static VMA_MUTEX gDebugGlobalMutex;
    3288  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3289 #else
    3290  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3291 #endif
    3292 
    3293 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3294 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3295 
    3296 /*
    3297 Performs binary search and returns iterator to first element that is greater or
    3298 equal to (key), according to comparison (cmp).
    3299 
    3300 Cmp should return true if first argument is less than second argument.
    3301 
    3302 Returned value is the found element, if present in the collection or place where
    3303 new element with value (key) should be inserted.
    3304 */
    3305 template <typename CmpLess, typename IterT, typename KeyT>
    3306 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3307 {
    3308  size_t down = 0, up = (end - beg);
    3309  while(down < up)
    3310  {
    3311  const size_t mid = (down + up) / 2;
    3312  if(cmp(*(beg+mid), key))
    3313  {
    3314  down = mid + 1;
    3315  }
    3316  else
    3317  {
    3318  up = mid;
    3319  }
    3320  }
    3321  return beg + down;
    3322 }
    3323 
    3325 // Memory allocation
    3326 
    3327 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3328 {
    3329  if((pAllocationCallbacks != VMA_NULL) &&
    3330  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3331  {
    3332  return (*pAllocationCallbacks->pfnAllocation)(
    3333  pAllocationCallbacks->pUserData,
    3334  size,
    3335  alignment,
    3336  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3337  }
    3338  else
    3339  {
    3340  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3341  }
    3342 }
    3343 
    3344 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3345 {
    3346  if((pAllocationCallbacks != VMA_NULL) &&
    3347  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3348  {
    3349  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3350  }
    3351  else
    3352  {
    3353  VMA_SYSTEM_FREE(ptr);
    3354  }
    3355 }
    3356 
    3357 template<typename T>
    3358 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3359 {
    3360  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3361 }
    3362 
    3363 template<typename T>
    3364 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3365 {
    3366  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3367 }
    3368 
    3369 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3370 
    3371 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3372 
    3373 template<typename T>
    3374 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3375 {
    3376  ptr->~T();
    3377  VmaFree(pAllocationCallbacks, ptr);
    3378 }
    3379 
    3380 template<typename T>
    3381 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3382 {
    3383  if(ptr != VMA_NULL)
    3384  {
    3385  for(size_t i = count; i--; )
    3386  {
    3387  ptr[i].~T();
    3388  }
    3389  VmaFree(pAllocationCallbacks, ptr);
    3390  }
    3391 }
    3392 
    3393 // STL-compatible allocator.
    3394 template<typename T>
    3395 class VmaStlAllocator
    3396 {
    3397 public:
    3398  const VkAllocationCallbacks* const m_pCallbacks;
    3399  typedef T value_type;
    3400 
    3401  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3402  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3403 
    3404  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3405  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3406 
    3407  template<typename U>
    3408  bool operator==(const VmaStlAllocator<U>& rhs) const
    3409  {
    3410  return m_pCallbacks == rhs.m_pCallbacks;
    3411  }
    3412  template<typename U>
    3413  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3414  {
    3415  return m_pCallbacks != rhs.m_pCallbacks;
    3416  }
    3417 
    3418  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3419 };
    3420 
    3421 #if VMA_USE_STL_VECTOR
    3422 
    3423 #define VmaVector std::vector
    3424 
    3425 template<typename T, typename allocatorT>
    3426 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3427 {
    3428  vec.insert(vec.begin() + index, item);
    3429 }
    3430 
    3431 template<typename T, typename allocatorT>
    3432 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3433 {
    3434  vec.erase(vec.begin() + index);
    3435 }
    3436 
    3437 #else // #if VMA_USE_STL_VECTOR
    3438 
    3439 /* Class with interface compatible with subset of std::vector.
    3440 T must be POD because constructors and destructors are not called and memcpy is
    3441 used for these objects. */
    3442 template<typename T, typename AllocatorT>
    3443 class VmaVector
    3444 {
    3445 public:
    3446  typedef T value_type;
    3447 
    3448  VmaVector(const AllocatorT& allocator) :
    3449  m_Allocator(allocator),
    3450  m_pArray(VMA_NULL),
    3451  m_Count(0),
    3452  m_Capacity(0)
    3453  {
    3454  }
    3455 
    3456  VmaVector(size_t count, const AllocatorT& allocator) :
    3457  m_Allocator(allocator),
    3458  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3459  m_Count(count),
    3460  m_Capacity(count)
    3461  {
    3462  }
    3463 
    3464  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3465  m_Allocator(src.m_Allocator),
    3466  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3467  m_Count(src.m_Count),
    3468  m_Capacity(src.m_Count)
    3469  {
    3470  if(m_Count != 0)
    3471  {
    3472  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3473  }
    3474  }
    3475 
    3476  ~VmaVector()
    3477  {
    3478  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3479  }
    3480 
    3481  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3482  {
    3483  if(&rhs != this)
    3484  {
    3485  resize(rhs.m_Count);
    3486  if(m_Count != 0)
    3487  {
    3488  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3489  }
    3490  }
    3491  return *this;
    3492  }
    3493 
    3494  bool empty() const { return m_Count == 0; }
    3495  size_t size() const { return m_Count; }
    3496  T* data() { return m_pArray; }
    3497  const T* data() const { return m_pArray; }
    3498 
    3499  T& operator[](size_t index)
    3500  {
    3501  VMA_HEAVY_ASSERT(index < m_Count);
    3502  return m_pArray[index];
    3503  }
    3504  const T& operator[](size_t index) const
    3505  {
    3506  VMA_HEAVY_ASSERT(index < m_Count);
    3507  return m_pArray[index];
    3508  }
    3509 
    3510  T& front()
    3511  {
    3512  VMA_HEAVY_ASSERT(m_Count > 0);
    3513  return m_pArray[0];
    3514  }
    3515  const T& front() const
    3516  {
    3517  VMA_HEAVY_ASSERT(m_Count > 0);
    3518  return m_pArray[0];
    3519  }
    3520  T& back()
    3521  {
    3522  VMA_HEAVY_ASSERT(m_Count > 0);
    3523  return m_pArray[m_Count - 1];
    3524  }
    3525  const T& back() const
    3526  {
    3527  VMA_HEAVY_ASSERT(m_Count > 0);
    3528  return m_pArray[m_Count - 1];
    3529  }
    3530 
    3531  void reserve(size_t newCapacity, bool freeMemory = false)
    3532  {
    3533  newCapacity = VMA_MAX(newCapacity, m_Count);
    3534 
    3535  if((newCapacity < m_Capacity) && !freeMemory)
    3536  {
    3537  newCapacity = m_Capacity;
    3538  }
    3539 
    3540  if(newCapacity != m_Capacity)
    3541  {
    3542  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3543  if(m_Count != 0)
    3544  {
    3545  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3546  }
    3547  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3548  m_Capacity = newCapacity;
    3549  m_pArray = newArray;
    3550  }
    3551  }
    3552 
    3553  void resize(size_t newCount, bool freeMemory = false)
    3554  {
    3555  size_t newCapacity = m_Capacity;
    3556  if(newCount > m_Capacity)
    3557  {
    3558  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3559  }
    3560  else if(freeMemory)
    3561  {
    3562  newCapacity = newCount;
    3563  }
    3564 
    3565  if(newCapacity != m_Capacity)
    3566  {
    3567  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3568  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3569  if(elementsToCopy != 0)
    3570  {
    3571  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3572  }
    3573  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3574  m_Capacity = newCapacity;
    3575  m_pArray = newArray;
    3576  }
    3577 
    3578  m_Count = newCount;
    3579  }
    3580 
    3581  void clear(bool freeMemory = false)
    3582  {
    3583  resize(0, freeMemory);
    3584  }
    3585 
    3586  void insert(size_t index, const T& src)
    3587  {
    3588  VMA_HEAVY_ASSERT(index <= m_Count);
    3589  const size_t oldCount = size();
    3590  resize(oldCount + 1);
    3591  if(index < oldCount)
    3592  {
    3593  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3594  }
    3595  m_pArray[index] = src;
    3596  }
    3597 
    3598  void remove(size_t index)
    3599  {
    3600  VMA_HEAVY_ASSERT(index < m_Count);
    3601  const size_t oldCount = size();
    3602  if(index < oldCount - 1)
    3603  {
    3604  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3605  }
    3606  resize(oldCount - 1);
    3607  }
    3608 
    3609  void push_back(const T& src)
    3610  {
    3611  const size_t newIndex = size();
    3612  resize(newIndex + 1);
    3613  m_pArray[newIndex] = src;
    3614  }
    3615 
    3616  void pop_back()
    3617  {
    3618  VMA_HEAVY_ASSERT(m_Count > 0);
    3619  resize(size() - 1);
    3620  }
    3621 
    3622  void push_front(const T& src)
    3623  {
    3624  insert(0, src);
    3625  }
    3626 
    3627  void pop_front()
    3628  {
    3629  VMA_HEAVY_ASSERT(m_Count > 0);
    3630  remove(0);
    3631  }
    3632 
    3633  typedef T* iterator;
    3634 
    3635  iterator begin() { return m_pArray; }
    3636  iterator end() { return m_pArray + m_Count; }
    3637 
    3638 private:
    3639  AllocatorT m_Allocator;
    3640  T* m_pArray;
    3641  size_t m_Count;
    3642  size_t m_Capacity;
    3643 };
    3644 
    3645 template<typename T, typename allocatorT>
    3646 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3647 {
    3648  vec.insert(index, item);
    3649 }
    3650 
    3651 template<typename T, typename allocatorT>
    3652 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3653 {
    3654  vec.remove(index);
    3655 }
    3656 
    3657 #endif // #if VMA_USE_STL_VECTOR
    3658 
    3659 template<typename CmpLess, typename VectorT>
    3660 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3661 {
    3662  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3663  vector.data(),
    3664  vector.data() + vector.size(),
    3665  value,
    3666  CmpLess()) - vector.data();
    3667  VmaVectorInsert(vector, indexToInsert, value);
    3668  return indexToInsert;
    3669 }
    3670 
    3671 template<typename CmpLess, typename VectorT>
    3672 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3673 {
    3674  CmpLess comparator;
    3675  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3676  vector.begin(),
    3677  vector.end(),
    3678  value,
    3679  comparator);
    3680  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3681  {
    3682  size_t indexToRemove = it - vector.begin();
    3683  VmaVectorRemove(vector, indexToRemove);
    3684  return true;
    3685  }
    3686  return false;
    3687 }
    3688 
    3689 template<typename CmpLess, typename IterT, typename KeyT>
    3690 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3691 {
    3692  CmpLess comparator;
    3693  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3694  beg, end, value, comparator);
    3695  if(it == end ||
    3696  (!comparator(*it, value) && !comparator(value, *it)))
    3697  {
    3698  return it;
    3699  }
    3700  return end;
    3701 }
    3702 
    3704 // class VmaPoolAllocator
    3705 
    3706 /*
    3707 Allocator for objects of type T using a list of arrays (pools) to speed up
    3708 allocation. Number of elements that can be allocated is not bounded because
    3709 allocator can create multiple blocks.
    3710 */
    3711 template<typename T>
    3712 class VmaPoolAllocator
    3713 {
    3714  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3715 public:
    3716  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3717  ~VmaPoolAllocator();
    3718  void Clear();
    3719  T* Alloc();
    3720  void Free(T* ptr);
    3721 
    3722 private:
    3723  union Item
    3724  {
    3725  uint32_t NextFreeIndex;
    3726  T Value;
    3727  };
    3728 
    3729  struct ItemBlock
    3730  {
    3731  Item* pItems;
    3732  uint32_t FirstFreeIndex;
    3733  };
    3734 
    3735  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3736  size_t m_ItemsPerBlock;
    3737  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3738 
    3739  ItemBlock& CreateNewBlock();
    3740 };
    3741 
    3742 template<typename T>
    3743 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3744  m_pAllocationCallbacks(pAllocationCallbacks),
    3745  m_ItemsPerBlock(itemsPerBlock),
    3746  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3747 {
    3748  VMA_ASSERT(itemsPerBlock > 0);
    3749 }
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3753 {
    3754  Clear();
    3755 }
    3756 
    3757 template<typename T>
    3758 void VmaPoolAllocator<T>::Clear()
    3759 {
    3760  for(size_t i = m_ItemBlocks.size(); i--; )
    3761  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3762  m_ItemBlocks.clear();
    3763 }
    3764 
    3765 template<typename T>
    3766 T* VmaPoolAllocator<T>::Alloc()
    3767 {
    3768  for(size_t i = m_ItemBlocks.size(); i--; )
    3769  {
    3770  ItemBlock& block = m_ItemBlocks[i];
    3771  // This block has some free items: Use first one.
    3772  if(block.FirstFreeIndex != UINT32_MAX)
    3773  {
    3774  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3775  block.FirstFreeIndex = pItem->NextFreeIndex;
    3776  return &pItem->Value;
    3777  }
    3778  }
    3779 
    3780  // No block has free item: Create new one and use it.
    3781  ItemBlock& newBlock = CreateNewBlock();
    3782  Item* const pItem = &newBlock.pItems[0];
    3783  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3784  return &pItem->Value;
    3785 }
    3786 
    3787 template<typename T>
    3788 void VmaPoolAllocator<T>::Free(T* ptr)
    3789 {
    3790  // Search all memory blocks to find ptr.
    3791  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3792  {
    3793  ItemBlock& block = m_ItemBlocks[i];
    3794 
    3795  // Casting to union.
    3796  Item* pItemPtr;
    3797  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3798 
    3799  // Check if pItemPtr is in address range of this block.
    3800  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3801  {
    3802  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3803  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3804  block.FirstFreeIndex = index;
    3805  return;
    3806  }
    3807  }
    3808  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3809 }
    3810 
    3811 template<typename T>
    3812 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3813 {
    3814  ItemBlock newBlock = {
    3815  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3816 
    3817  m_ItemBlocks.push_back(newBlock);
    3818 
    3819  // Setup singly-linked list of all free items in this block.
    3820  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3821  newBlock.pItems[i].NextFreeIndex = i + 1;
    3822  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3823  return m_ItemBlocks.back();
    3824 }
    3825 
    3827 // class VmaRawList, VmaList
    3828 
    3829 #if VMA_USE_STL_LIST
    3830 
    3831 #define VmaList std::list
    3832 
    3833 #else // #if VMA_USE_STL_LIST
    3834 
    3835 template<typename T>
    3836 struct VmaListItem
    3837 {
    3838  VmaListItem* pPrev;
    3839  VmaListItem* pNext;
    3840  T Value;
    3841 };
    3842 
    3843 // Doubly linked list.
    3844 template<typename T>
    3845 class VmaRawList
    3846 {
    3847  VMA_CLASS_NO_COPY(VmaRawList)
    3848 public:
    3849  typedef VmaListItem<T> ItemType;
    3850 
    3851  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3852  ~VmaRawList();
    3853  void Clear();
    3854 
    3855  size_t GetCount() const { return m_Count; }
    3856  bool IsEmpty() const { return m_Count == 0; }
    3857 
    3858  ItemType* Front() { return m_pFront; }
    3859  const ItemType* Front() const { return m_pFront; }
    3860  ItemType* Back() { return m_pBack; }
    3861  const ItemType* Back() const { return m_pBack; }
    3862 
    3863  ItemType* PushBack();
    3864  ItemType* PushFront();
    3865  ItemType* PushBack(const T& value);
    3866  ItemType* PushFront(const T& value);
    3867  void PopBack();
    3868  void PopFront();
    3869 
    3870  // Item can be null - it means PushBack.
    3871  ItemType* InsertBefore(ItemType* pItem);
    3872  // Item can be null - it means PushFront.
    3873  ItemType* InsertAfter(ItemType* pItem);
    3874 
    3875  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3876  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3877 
    3878  void Remove(ItemType* pItem);
    3879 
    3880 private:
    3881  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3882  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3883  ItemType* m_pFront;
    3884  ItemType* m_pBack;
    3885  size_t m_Count;
    3886 };
    3887 
    3888 template<typename T>
    3889 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3890  m_pAllocationCallbacks(pAllocationCallbacks),
    3891  m_ItemAllocator(pAllocationCallbacks, 128),
    3892  m_pFront(VMA_NULL),
    3893  m_pBack(VMA_NULL),
    3894  m_Count(0)
    3895 {
    3896 }
    3897 
    3898 template<typename T>
    3899 VmaRawList<T>::~VmaRawList()
    3900 {
    3901  // Intentionally not calling Clear, because that would be unnecessary
    3902  // computations to return all items to m_ItemAllocator as free.
    3903 }
    3904 
    3905 template<typename T>
    3906 void VmaRawList<T>::Clear()
    3907 {
    3908  if(IsEmpty() == false)
    3909  {
    3910  ItemType* pItem = m_pBack;
    3911  while(pItem != VMA_NULL)
    3912  {
    3913  ItemType* const pPrevItem = pItem->pPrev;
    3914  m_ItemAllocator.Free(pItem);
    3915  pItem = pPrevItem;
    3916  }
    3917  m_pFront = VMA_NULL;
    3918  m_pBack = VMA_NULL;
    3919  m_Count = 0;
    3920  }
    3921 }
    3922 
    3923 template<typename T>
    3924 VmaListItem<T>* VmaRawList<T>::PushBack()
    3925 {
    3926  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3927  pNewItem->pNext = VMA_NULL;
    3928  if(IsEmpty())
    3929  {
    3930  pNewItem->pPrev = VMA_NULL;
    3931  m_pFront = pNewItem;
    3932  m_pBack = pNewItem;
    3933  m_Count = 1;
    3934  }
    3935  else
    3936  {
    3937  pNewItem->pPrev = m_pBack;
    3938  m_pBack->pNext = pNewItem;
    3939  m_pBack = pNewItem;
    3940  ++m_Count;
    3941  }
    3942  return pNewItem;
    3943 }
    3944 
    3945 template<typename T>
    3946 VmaListItem<T>* VmaRawList<T>::PushFront()
    3947 {
    3948  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3949  pNewItem->pPrev = VMA_NULL;
    3950  if(IsEmpty())
    3951  {
    3952  pNewItem->pNext = VMA_NULL;
    3953  m_pFront = pNewItem;
    3954  m_pBack = pNewItem;
    3955  m_Count = 1;
    3956  }
    3957  else
    3958  {
    3959  pNewItem->pNext = m_pFront;
    3960  m_pFront->pPrev = pNewItem;
    3961  m_pFront = pNewItem;
    3962  ++m_Count;
    3963  }
    3964  return pNewItem;
    3965 }
    3966 
    3967 template<typename T>
    3968 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3969 {
    3970  ItemType* const pNewItem = PushBack();
    3971  pNewItem->Value = value;
    3972  return pNewItem;
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3977 {
    3978  ItemType* const pNewItem = PushFront();
    3979  pNewItem->Value = value;
    3980  return pNewItem;
    3981 }
    3982 
    3983 template<typename T>
    3984 void VmaRawList<T>::PopBack()
    3985 {
    3986  VMA_HEAVY_ASSERT(m_Count > 0);
    3987  ItemType* const pBackItem = m_pBack;
    3988  ItemType* const pPrevItem = pBackItem->pPrev;
    3989  if(pPrevItem != VMA_NULL)
    3990  {
    3991  pPrevItem->pNext = VMA_NULL;
    3992  }
    3993  m_pBack = pPrevItem;
    3994  m_ItemAllocator.Free(pBackItem);
    3995  --m_Count;
    3996 }
    3997 
    3998 template<typename T>
    3999 void VmaRawList<T>::PopFront()
    4000 {
    4001  VMA_HEAVY_ASSERT(m_Count > 0);
    4002  ItemType* const pFrontItem = m_pFront;
    4003  ItemType* const pNextItem = pFrontItem->pNext;
    4004  if(pNextItem != VMA_NULL)
    4005  {
    4006  pNextItem->pPrev = VMA_NULL;
    4007  }
    4008  m_pFront = pNextItem;
    4009  m_ItemAllocator.Free(pFrontItem);
    4010  --m_Count;
    4011 }
    4012 
    4013 template<typename T>
    4014 void VmaRawList<T>::Remove(ItemType* pItem)
    4015 {
    4016  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4017  VMA_HEAVY_ASSERT(m_Count > 0);
    4018 
    4019  if(pItem->pPrev != VMA_NULL)
    4020  {
    4021  pItem->pPrev->pNext = pItem->pNext;
    4022  }
    4023  else
    4024  {
    4025  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4026  m_pFront = pItem->pNext;
    4027  }
    4028 
    4029  if(pItem->pNext != VMA_NULL)
    4030  {
    4031  pItem->pNext->pPrev = pItem->pPrev;
    4032  }
    4033  else
    4034  {
    4035  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4036  m_pBack = pItem->pPrev;
    4037  }
    4038 
    4039  m_ItemAllocator.Free(pItem);
    4040  --m_Count;
    4041 }
    4042 
    4043 template<typename T>
    4044 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4045 {
    4046  if(pItem != VMA_NULL)
    4047  {
    4048  ItemType* const prevItem = pItem->pPrev;
    4049  ItemType* const newItem = m_ItemAllocator.Alloc();
    4050  newItem->pPrev = prevItem;
    4051  newItem->pNext = pItem;
    4052  pItem->pPrev = newItem;
    4053  if(prevItem != VMA_NULL)
    4054  {
    4055  prevItem->pNext = newItem;
    4056  }
    4057  else
    4058  {
    4059  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4060  m_pFront = newItem;
    4061  }
    4062  ++m_Count;
    4063  return newItem;
    4064  }
    4065  else
    4066  return PushBack();
    4067 }
    4068 
    4069 template<typename T>
    4070 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4071 {
    4072  if(pItem != VMA_NULL)
    4073  {
    4074  ItemType* const nextItem = pItem->pNext;
    4075  ItemType* const newItem = m_ItemAllocator.Alloc();
    4076  newItem->pNext = nextItem;
    4077  newItem->pPrev = pItem;
    4078  pItem->pNext = newItem;
    4079  if(nextItem != VMA_NULL)
    4080  {
    4081  nextItem->pPrev = newItem;
    4082  }
    4083  else
    4084  {
    4085  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4086  m_pBack = newItem;
    4087  }
    4088  ++m_Count;
    4089  return newItem;
    4090  }
    4091  else
    4092  return PushFront();
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4097 {
    4098  ItemType* const newItem = InsertBefore(pItem);
    4099  newItem->Value = value;
    4100  return newItem;
    4101 }
    4102 
    4103 template<typename T>
    4104 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4105 {
    4106  ItemType* const newItem = InsertAfter(pItem);
    4107  newItem->Value = value;
    4108  return newItem;
    4109 }
    4110 
    4111 template<typename T, typename AllocatorT>
    4112 class VmaList
    4113 {
    4114  VMA_CLASS_NO_COPY(VmaList)
    4115 public:
    4116  class iterator
    4117  {
    4118  public:
    4119  iterator() :
    4120  m_pList(VMA_NULL),
    4121  m_pItem(VMA_NULL)
    4122  {
    4123  }
    4124 
    4125  T& operator*() const
    4126  {
    4127  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4128  return m_pItem->Value;
    4129  }
    4130  T* operator->() const
    4131  {
    4132  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4133  return &m_pItem->Value;
    4134  }
    4135 
    4136  iterator& operator++()
    4137  {
    4138  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4139  m_pItem = m_pItem->pNext;
    4140  return *this;
    4141  }
    4142  iterator& operator--()
    4143  {
    4144  if(m_pItem != VMA_NULL)
    4145  {
    4146  m_pItem = m_pItem->pPrev;
    4147  }
    4148  else
    4149  {
    4150  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4151  m_pItem = m_pList->Back();
    4152  }
    4153  return *this;
    4154  }
    4155 
    4156  iterator operator++(int)
    4157  {
    4158  iterator result = *this;
    4159  ++*this;
    4160  return result;
    4161  }
    4162  iterator operator--(int)
    4163  {
    4164  iterator result = *this;
    4165  --*this;
    4166  return result;
    4167  }
    4168 
    4169  bool operator==(const iterator& rhs) const
    4170  {
    4171  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4172  return m_pItem == rhs.m_pItem;
    4173  }
    4174  bool operator!=(const iterator& rhs) const
    4175  {
    4176  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4177  return m_pItem != rhs.m_pItem;
    4178  }
    4179 
    4180  private:
    4181  VmaRawList<T>* m_pList;
    4182  VmaListItem<T>* m_pItem;
    4183 
    4184  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4185  m_pList(pList),
    4186  m_pItem(pItem)
    4187  {
    4188  }
    4189 
    4190  friend class VmaList<T, AllocatorT>;
    4191  };
    4192 
    4193  class const_iterator
    4194  {
    4195  public:
    4196  const_iterator() :
    4197  m_pList(VMA_NULL),
    4198  m_pItem(VMA_NULL)
    4199  {
    4200  }
    4201 
    4202  const_iterator(const iterator& src) :
    4203  m_pList(src.m_pList),
    4204  m_pItem(src.m_pItem)
    4205  {
    4206  }
    4207 
    4208  const T& operator*() const
    4209  {
    4210  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4211  return m_pItem->Value;
    4212  }
    4213  const T* operator->() const
    4214  {
    4215  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4216  return &m_pItem->Value;
    4217  }
    4218 
    4219  const_iterator& operator++()
    4220  {
    4221  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4222  m_pItem = m_pItem->pNext;
    4223  return *this;
    4224  }
    4225  const_iterator& operator--()
    4226  {
    4227  if(m_pItem != VMA_NULL)
    4228  {
    4229  m_pItem = m_pItem->pPrev;
    4230  }
    4231  else
    4232  {
    4233  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4234  m_pItem = m_pList->Back();
    4235  }
    4236  return *this;
    4237  }
    4238 
    4239  const_iterator operator++(int)
    4240  {
    4241  const_iterator result = *this;
    4242  ++*this;
    4243  return result;
    4244  }
    4245  const_iterator operator--(int)
    4246  {
    4247  const_iterator result = *this;
    4248  --*this;
    4249  return result;
    4250  }
    4251 
    4252  bool operator==(const const_iterator& rhs) const
    4253  {
    4254  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4255  return m_pItem == rhs.m_pItem;
    4256  }
    4257  bool operator!=(const const_iterator& rhs) const
    4258  {
    4259  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4260  return m_pItem != rhs.m_pItem;
    4261  }
    4262 
    4263  private:
    4264  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4265  m_pList(pList),
    4266  m_pItem(pItem)
    4267  {
    4268  }
    4269 
    4270  const VmaRawList<T>* m_pList;
    4271  const VmaListItem<T>* m_pItem;
    4272 
    4273  friend class VmaList<T, AllocatorT>;
    4274  };
    4275 
    4276  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4277 
    4278  bool empty() const { return m_RawList.IsEmpty(); }
    4279  size_t size() const { return m_RawList.GetCount(); }
    4280 
    4281  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4282  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4283 
    4284  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4285  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4286 
    4287  void clear() { m_RawList.Clear(); }
    4288  void push_back(const T& value) { m_RawList.PushBack(value); }
    4289  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4290  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4291 
    4292 private:
    4293  VmaRawList<T> m_RawList;
    4294 };
    4295 
    4296 #endif // #if VMA_USE_STL_LIST
    4297 
    4299 // class VmaMap
    4300 
    4301 // Unused in this version.
    4302 #if 0
    4303 
    4304 #if VMA_USE_STL_UNORDERED_MAP
    4305 
    4306 #define VmaPair std::pair
    4307 
    4308 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4309  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4310 
    4311 #else // #if VMA_USE_STL_UNORDERED_MAP
    4312 
    4313 template<typename T1, typename T2>
    4314 struct VmaPair
    4315 {
    4316  T1 first;
    4317  T2 second;
    4318 
    4319  VmaPair() : first(), second() { }
    4320  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4321 };
    4322 
    4323 /* Class compatible with subset of interface of std::unordered_map.
    4324 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4325 */
    4326 template<typename KeyT, typename ValueT>
    4327 class VmaMap
    4328 {
    4329 public:
    4330  typedef VmaPair<KeyT, ValueT> PairType;
    4331  typedef PairType* iterator;
    4332 
    4333  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4334 
    4335  iterator begin() { return m_Vector.begin(); }
    4336  iterator end() { return m_Vector.end(); }
    4337 
    4338  void insert(const PairType& pair);
    4339  iterator find(const KeyT& key);
    4340  void erase(iterator it);
    4341 
    4342 private:
    4343  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4344 };
    4345 
    4346 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4347 
    4348 template<typename FirstT, typename SecondT>
    4349 struct VmaPairFirstLess
    4350 {
    4351  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4352  {
    4353  return lhs.first < rhs.first;
    4354  }
    4355  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4356  {
    4357  return lhs.first < rhsFirst;
    4358  }
    4359 };
    4360 
    4361 template<typename KeyT, typename ValueT>
    4362 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4363 {
    4364  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4365  m_Vector.data(),
    4366  m_Vector.data() + m_Vector.size(),
    4367  pair,
    4368  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4369  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4370 }
    4371 
    4372 template<typename KeyT, typename ValueT>
    4373 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4374 {
    4375  PairType* it = VmaBinaryFindFirstNotLess(
    4376  m_Vector.data(),
    4377  m_Vector.data() + m_Vector.size(),
    4378  key,
    4379  VmaPairFirstLess<KeyT, ValueT>());
    4380  if((it != m_Vector.end()) && (it->first == key))
    4381  {
    4382  return it;
    4383  }
    4384  else
    4385  {
    4386  return m_Vector.end();
    4387  }
    4388 }
    4389 
    4390 template<typename KeyT, typename ValueT>
    4391 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4392 {
    4393  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4394 }
    4395 
    4396 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4397 
    4398 #endif // #if 0
    4399 
    4401 
    4402 class VmaDeviceMemoryBlock;
    4403 
    4404 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4405 
    4406 struct VmaAllocation_T
    4407 {
    4408  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4409 private:
    4410  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4411 
    4412  enum FLAGS
    4413  {
    4414  FLAG_USER_DATA_STRING = 0x01,
    4415  };
    4416 
    4417 public:
    4418  enum ALLOCATION_TYPE
    4419  {
    4420  ALLOCATION_TYPE_NONE,
    4421  ALLOCATION_TYPE_BLOCK,
    4422  ALLOCATION_TYPE_DEDICATED,
    4423  };
    4424 
    4425  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4426  m_Alignment(1),
    4427  m_Size(0),
    4428  m_pUserData(VMA_NULL),
    4429  m_LastUseFrameIndex(currentFrameIndex),
    4430  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4431  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4432  m_MapCount(0),
    4433  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4434  {
    4435 #if VMA_STATS_STRING_ENABLED
    4436  m_CreationFrameIndex = currentFrameIndex;
    4437  m_BufferImageUsage = 0;
    4438 #endif
    4439  }
    4440 
    4441  ~VmaAllocation_T()
    4442  {
    4443  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4444 
    4445  // Check if owned string was freed.
    4446  VMA_ASSERT(m_pUserData == VMA_NULL);
    4447  }
    4448 
    4449  void InitBlockAllocation(
    4450  VmaPool hPool,
    4451  VmaDeviceMemoryBlock* block,
    4452  VkDeviceSize offset,
    4453  VkDeviceSize alignment,
    4454  VkDeviceSize size,
    4455  VmaSuballocationType suballocationType,
    4456  bool mapped,
    4457  bool canBecomeLost)
    4458  {
    4459  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4460  VMA_ASSERT(block != VMA_NULL);
    4461  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4462  m_Alignment = alignment;
    4463  m_Size = size;
    4464  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4465  m_SuballocationType = (uint8_t)suballocationType;
    4466  m_BlockAllocation.m_hPool = hPool;
    4467  m_BlockAllocation.m_Block = block;
    4468  m_BlockAllocation.m_Offset = offset;
    4469  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4470  }
    4471 
    4472  void InitLost()
    4473  {
    4474  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4475  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4476  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4477  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4478  m_BlockAllocation.m_Block = VMA_NULL;
    4479  m_BlockAllocation.m_Offset = 0;
    4480  m_BlockAllocation.m_CanBecomeLost = true;
    4481  }
    4482 
    4483  void ChangeBlockAllocation(
    4484  VmaAllocator hAllocator,
    4485  VmaDeviceMemoryBlock* block,
    4486  VkDeviceSize offset);
    4487 
    4488  // pMappedData not null means allocation is created with MAPPED flag.
    4489  void InitDedicatedAllocation(
    4490  uint32_t memoryTypeIndex,
    4491  VkDeviceMemory hMemory,
    4492  VmaSuballocationType suballocationType,
    4493  void* pMappedData,
    4494  VkDeviceSize size)
    4495  {
    4496  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4497  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4498  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4499  m_Alignment = 0;
    4500  m_Size = size;
    4501  m_SuballocationType = (uint8_t)suballocationType;
    4502  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4503  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4504  m_DedicatedAllocation.m_hMemory = hMemory;
    4505  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4506  }
    4507 
    4508  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4509  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4510  VkDeviceSize GetSize() const { return m_Size; }
    4511  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4512  void* GetUserData() const { return m_pUserData; }
    4513  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4514  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4515 
    4516  VmaDeviceMemoryBlock* GetBlock() const
    4517  {
    4518  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4519  return m_BlockAllocation.m_Block;
    4520  }
    4521  VkDeviceSize GetOffset() const;
    4522  VkDeviceMemory GetMemory() const;
    4523  uint32_t GetMemoryTypeIndex() const;
    4524  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4525  void* GetMappedData() const;
    4526  bool CanBecomeLost() const;
    4527  VmaPool GetPool() const;
    4528 
    4529  uint32_t GetLastUseFrameIndex() const
    4530  {
    4531  return m_LastUseFrameIndex.load();
    4532  }
    4533  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4534  {
    4535  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4536  }
    4537  /*
    4538  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4539  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4540  - Else, returns false.
    4541 
    4542  If hAllocation is already lost, assert - you should not call it then.
    4543  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4544  */
    4545  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4546 
    4547  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4550  outInfo.blockCount = 1;
    4551  outInfo.allocationCount = 1;
    4552  outInfo.unusedRangeCount = 0;
    4553  outInfo.usedBytes = m_Size;
    4554  outInfo.unusedBytes = 0;
    4555  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4556  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4557  outInfo.unusedRangeSizeMax = 0;
    4558  }
    4559 
    4560  void BlockAllocMap();
    4561  void BlockAllocUnmap();
    4562  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4563  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4564 
    4565 #if VMA_STATS_STRING_ENABLED
    4566  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4567  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4568 
    4569  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4570  {
    4571  VMA_ASSERT(m_BufferImageUsage == 0);
    4572  m_BufferImageUsage = bufferImageUsage;
    4573  }
    4574 
    4575  void PrintParameters(class VmaJsonWriter& json) const;
    4576 #endif
    4577 
    4578 private:
    4579  VkDeviceSize m_Alignment;
    4580  VkDeviceSize m_Size;
    4581  void* m_pUserData;
    4582  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4583  uint8_t m_Type; // ALLOCATION_TYPE
    4584  uint8_t m_SuballocationType; // VmaSuballocationType
    4585  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4586  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4587  uint8_t m_MapCount;
    4588  uint8_t m_Flags; // enum FLAGS
    4589 
    4590  // Allocation out of VmaDeviceMemoryBlock.
    4591  struct BlockAllocation
    4592  {
    4593  VmaPool m_hPool; // Null if belongs to general memory.
    4594  VmaDeviceMemoryBlock* m_Block;
    4595  VkDeviceSize m_Offset;
    4596  bool m_CanBecomeLost;
    4597  };
    4598 
    4599  // Allocation for an object that has its own private VkDeviceMemory.
    4600  struct DedicatedAllocation
    4601  {
    4602  uint32_t m_MemoryTypeIndex;
    4603  VkDeviceMemory m_hMemory;
    4604  void* m_pMappedData; // Not null means memory is mapped.
    4605  };
    4606 
    4607  union
    4608  {
    4609  // Allocation out of VmaDeviceMemoryBlock.
    4610  BlockAllocation m_BlockAllocation;
    4611  // Allocation for an object that has its own private VkDeviceMemory.
    4612  DedicatedAllocation m_DedicatedAllocation;
    4613  };
    4614 
    4615 #if VMA_STATS_STRING_ENABLED
    4616  uint32_t m_CreationFrameIndex;
    4617  uint32_t m_BufferImageUsage; // 0 if unknown.
    4618 #endif
    4619 
    4620  void FreeUserDataString(VmaAllocator hAllocator);
    4621 };
    4622 
    4623 /*
    4624 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4625 allocated memory block or free.
    4626 */
    4627 struct VmaSuballocation
    4628 {
    4629  VkDeviceSize offset;
    4630  VkDeviceSize size;
    4631  VmaAllocation hAllocation;
    4632  VmaSuballocationType type;
    4633 };
    4634 
    4635 // Comparator for offsets.
    4636 struct VmaSuballocationOffsetLess
    4637 {
    4638  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4639  {
    4640  return lhs.offset < rhs.offset;
    4641  }
    4642 };
    4643 struct VmaSuballocationOffsetGreater
    4644 {
    4645  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4646  {
    4647  return lhs.offset > rhs.offset;
    4648  }
    4649 };
    4650 
    4651 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4652 
    4653 // Cost of one additional allocation lost, as equivalent in bytes.
    4654 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4655 
    4656 /*
    4657 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4658 
    4659 If canMakeOtherLost was false:
    4660 - item points to a FREE suballocation.
    4661 - itemsToMakeLostCount is 0.
    4662 
    4663 If canMakeOtherLost was true:
    4664 - item points to first of sequence of suballocations, which are either FREE,
    4665  or point to VmaAllocations that can become lost.
    4666 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4667  the requested allocation to succeed.
    4668 */
    4669 struct VmaAllocationRequest
    4670 {
    4671  VkDeviceSize offset;
    4672  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4673  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4674  VmaSuballocationList::iterator item;
    4675  size_t itemsToMakeLostCount;
    4676  void* customData;
    4677 
    4678  VkDeviceSize CalcCost() const
    4679  {
    4680  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4681  }
    4682 };
    4683 
    4684 /*
    4685 Data structure used for bookkeeping of allocations and unused ranges of memory
    4686 in a single VkDeviceMemory block.
    4687 */
    4688 class VmaBlockMetadata
    4689 {
    4690 public:
    4691  VmaBlockMetadata(VmaAllocator hAllocator);
    4692  virtual ~VmaBlockMetadata() { }
    4693  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4694 
    4695  // Validates all data structures inside this object. If not valid, returns false.
    4696  virtual bool Validate() const = 0;
    4697  VkDeviceSize GetSize() const { return m_Size; }
    4698  virtual size_t GetAllocationCount() const = 0;
    4699  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4700  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4701  // Returns true if this block is empty - contains only single free suballocation.
    4702  virtual bool IsEmpty() const = 0;
    4703 
    4704  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4705  // Shouldn't modify blockCount.
    4706  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4707 
    4708 #if VMA_STATS_STRING_ENABLED
    4709  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4710 #endif
    4711 
    4712  // Tries to find a place for suballocation with given parameters inside this block.
    4713  // If succeeded, fills pAllocationRequest and returns true.
    4714  // If failed, returns false.
    4715  virtual bool CreateAllocationRequest(
    4716  uint32_t currentFrameIndex,
    4717  uint32_t frameInUseCount,
    4718  VkDeviceSize bufferImageGranularity,
    4719  VkDeviceSize allocSize,
    4720  VkDeviceSize allocAlignment,
    4721  bool upperAddress,
    4722  VmaSuballocationType allocType,
    4723  bool canMakeOtherLost,
    4724  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4725  VmaAllocationRequest* pAllocationRequest) = 0;
    4726 
    4727  virtual bool MakeRequestedAllocationsLost(
    4728  uint32_t currentFrameIndex,
    4729  uint32_t frameInUseCount,
    4730  VmaAllocationRequest* pAllocationRequest) = 0;
    4731 
    4732  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4733 
    4734  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4735 
    4736  // Makes actual allocation based on request. Request must already be checked and valid.
    4737  virtual void Alloc(
    4738  const VmaAllocationRequest& request,
    4739  VmaSuballocationType type,
    4740  VkDeviceSize allocSize,
    4741  bool upperAddress,
    4742  VmaAllocation hAllocation) = 0;
    4743 
    4744  // Frees suballocation assigned to given memory region.
    4745  virtual void Free(const VmaAllocation allocation) = 0;
    4746  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4747 
    4748 protected:
    4749  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4750 
    4751 #if VMA_STATS_STRING_ENABLED
    4752  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4753  VkDeviceSize unusedBytes,
    4754  size_t allocationCount,
    4755  size_t unusedRangeCount) const;
    4756  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4757  VkDeviceSize offset,
    4758  VmaAllocation hAllocation) const;
    4759  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4760  VkDeviceSize offset,
    4761  VkDeviceSize size) const;
    4762  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4763 #endif
    4764 
    4765 private:
    4766  VkDeviceSize m_Size;
    4767  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4768 };
    4769 
    4770 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4771  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4772  return false; \
    4773  } } while(false)
    4774 
    4775 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4776 {
    4777  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4778 public:
    4779  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4780  virtual ~VmaBlockMetadata_Generic();
    4781  virtual void Init(VkDeviceSize size);
    4782 
    4783  virtual bool Validate() const;
    4784  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4785  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4786  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4787  virtual bool IsEmpty() const;
    4788 
    4789  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4790  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4791 
    4792 #if VMA_STATS_STRING_ENABLED
    4793  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4794 #endif
    4795 
    4796  virtual bool CreateAllocationRequest(
    4797  uint32_t currentFrameIndex,
    4798  uint32_t frameInUseCount,
    4799  VkDeviceSize bufferImageGranularity,
    4800  VkDeviceSize allocSize,
    4801  VkDeviceSize allocAlignment,
    4802  bool upperAddress,
    4803  VmaSuballocationType allocType,
    4804  bool canMakeOtherLost,
    4805  uint32_t strategy,
    4806  VmaAllocationRequest* pAllocationRequest);
    4807 
    4808  virtual bool MakeRequestedAllocationsLost(
    4809  uint32_t currentFrameIndex,
    4810  uint32_t frameInUseCount,
    4811  VmaAllocationRequest* pAllocationRequest);
    4812 
    4813  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4814 
    4815  virtual VkResult CheckCorruption(const void* pBlockData);
    4816 
    4817  virtual void Alloc(
    4818  const VmaAllocationRequest& request,
    4819  VmaSuballocationType type,
    4820  VkDeviceSize allocSize,
    4821  bool upperAddress,
    4822  VmaAllocation hAllocation);
    4823 
    4824  virtual void Free(const VmaAllocation allocation);
    4825  virtual void FreeAtOffset(VkDeviceSize offset);
    4826 
    4827 private:
    4828  uint32_t m_FreeCount;
    4829  VkDeviceSize m_SumFreeSize;
    4830  VmaSuballocationList m_Suballocations;
    4831  // Suballocations that are free and have size greater than certain threshold.
    4832  // Sorted by size, ascending.
    4833  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4834 
    4835  bool ValidateFreeSuballocationList() const;
    4836 
    4837  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4838  // If yes, fills pOffset and returns true. If no, returns false.
    4839  bool CheckAllocation(
    4840  uint32_t currentFrameIndex,
    4841  uint32_t frameInUseCount,
    4842  VkDeviceSize bufferImageGranularity,
    4843  VkDeviceSize allocSize,
    4844  VkDeviceSize allocAlignment,
    4845  VmaSuballocationType allocType,
    4846  VmaSuballocationList::const_iterator suballocItem,
    4847  bool canMakeOtherLost,
    4848  VkDeviceSize* pOffset,
    4849  size_t* itemsToMakeLostCount,
    4850  VkDeviceSize* pSumFreeSize,
    4851  VkDeviceSize* pSumItemSize) const;
    4852  // Given free suballocation, it merges it with following one, which must also be free.
    4853  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4854  // Releases given suballocation, making it free.
    4855  // Merges it with adjacent free suballocations if applicable.
    4856  // Returns iterator to new free suballocation at this place.
    4857  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4858  // Given free suballocation, it inserts it into sorted list of
    4859  // m_FreeSuballocationsBySize if it's suitable.
    4860  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4861  // Given free suballocation, it removes it from sorted list of
    4862  // m_FreeSuballocationsBySize if it's suitable.
    4863  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4864 };
    4865 
    4866 /*
    4867 Allocations and their references in internal data structure look like this:
    4868 
    4869 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4870 
    4871  0 +-------+
    4872  | |
    4873  | |
    4874  | |
    4875  +-------+
    4876  | Alloc | 1st[m_1stNullItemsBeginCount]
    4877  +-------+
    4878  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4879  +-------+
    4880  | ... |
    4881  +-------+
    4882  | Alloc | 1st[1st.size() - 1]
    4883  +-------+
    4884  | |
    4885  | |
    4886  | |
    4887 GetSize() +-------+
    4888 
    4889 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4890 
    4891  0 +-------+
    4892  | Alloc | 2nd[0]
    4893  +-------+
    4894  | Alloc | 2nd[1]
    4895  +-------+
    4896  | ... |
    4897  +-------+
    4898  | Alloc | 2nd[2nd.size() - 1]
    4899  +-------+
    4900  | |
    4901  | |
    4902  | |
    4903  +-------+
    4904  | Alloc | 1st[m_1stNullItemsBeginCount]
    4905  +-------+
    4906  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4907  +-------+
    4908  | ... |
    4909  +-------+
    4910  | Alloc | 1st[1st.size() - 1]
    4911  +-------+
    4912  | |
    4913 GetSize() +-------+
    4914 
    4915 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4916 
    4917  0 +-------+
    4918  | |
    4919  | |
    4920  | |
    4921  +-------+
    4922  | Alloc | 1st[m_1stNullItemsBeginCount]
    4923  +-------+
    4924  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4925  +-------+
    4926  | ... |
    4927  +-------+
    4928  | Alloc | 1st[1st.size() - 1]
    4929  +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 2nd[2nd.size() - 1]
    4935  +-------+
    4936  | ... |
    4937  +-------+
    4938  | Alloc | 2nd[1]
    4939  +-------+
    4940  | Alloc | 2nd[0]
    4941 GetSize() +-------+
    4942 
    4943 */
    4944 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4945 {
    4946  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4947 public:
    4948  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4949  virtual ~VmaBlockMetadata_Linear();
    4950  virtual void Init(VkDeviceSize size);
    4951 
    4952  virtual bool Validate() const;
    4953  virtual size_t GetAllocationCount() const;
    4954  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4955  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4956  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4957 
    4958  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4959  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4960 
    4961 #if VMA_STATS_STRING_ENABLED
    4962  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4963 #endif
    4964 
    4965  virtual bool CreateAllocationRequest(
    4966  uint32_t currentFrameIndex,
    4967  uint32_t frameInUseCount,
    4968  VkDeviceSize bufferImageGranularity,
    4969  VkDeviceSize allocSize,
    4970  VkDeviceSize allocAlignment,
    4971  bool upperAddress,
    4972  VmaSuballocationType allocType,
    4973  bool canMakeOtherLost,
    4974  uint32_t strategy,
    4975  VmaAllocationRequest* pAllocationRequest);
    4976 
    4977  virtual bool MakeRequestedAllocationsLost(
    4978  uint32_t currentFrameIndex,
    4979  uint32_t frameInUseCount,
    4980  VmaAllocationRequest* pAllocationRequest);
    4981 
    4982  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4983 
    4984  virtual VkResult CheckCorruption(const void* pBlockData);
    4985 
    4986  virtual void Alloc(
    4987  const VmaAllocationRequest& request,
    4988  VmaSuballocationType type,
    4989  VkDeviceSize allocSize,
    4990  bool upperAddress,
    4991  VmaAllocation hAllocation);
    4992 
    4993  virtual void Free(const VmaAllocation allocation);
    4994  virtual void FreeAtOffset(VkDeviceSize offset);
    4995 
    4996 private:
    4997  /*
    4998  There are two suballocation vectors, used in ping-pong way.
    4999  The one with index m_1stVectorIndex is called 1st.
    5000  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5001  2nd can be non-empty only when 1st is not empty.
    5002  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5003  */
    5004  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5005 
    5006  enum SECOND_VECTOR_MODE
    5007  {
    5008  SECOND_VECTOR_EMPTY,
    5009  /*
    5010  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5011  all have smaller offset.
    5012  */
    5013  SECOND_VECTOR_RING_BUFFER,
    5014  /*
    5015  Suballocations in 2nd vector are upper side of double stack.
    5016  They all have offsets higher than those in 1st vector.
    5017  Top of this stack means smaller offsets, but higher indices in this vector.
    5018  */
    5019  SECOND_VECTOR_DOUBLE_STACK,
    5020  };
    5021 
    5022  VkDeviceSize m_SumFreeSize;
    5023  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5024  uint32_t m_1stVectorIndex;
    5025  SECOND_VECTOR_MODE m_2ndVectorMode;
    5026 
    5027  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5028  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5029  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5030  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5031 
    5032  // Number of items in 1st vector with hAllocation = null at the beginning.
    5033  size_t m_1stNullItemsBeginCount;
    5034  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5035  size_t m_1stNullItemsMiddleCount;
    5036  // Number of items in 2nd vector with hAllocation = null.
    5037  size_t m_2ndNullItemsCount;
    5038 
    5039  bool ShouldCompact1st() const;
    5040  void CleanupAfterFree();
    5041 };
    5042 
    5043 /*
    5044 - GetSize() is the original size of allocated memory block.
    5045 - m_UsableSize is this size aligned down to a power of two.
    5046  All allocations and calculations happen relative to m_UsableSize.
    5047 - GetUnusableSize() is the difference between them.
    5048  It is repoted as separate, unused range, not available for allocations.
    5049 
    5050 Node at level 0 has size = m_UsableSize.
    5051 Each next level contains nodes with size 2 times smaller than current level.
    5052 m_LevelCount is the maximum number of levels to use in the current object.
    5053 */
    5054 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5055 {
    5056  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5057 public:
    5058  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5059  virtual ~VmaBlockMetadata_Buddy();
    5060  virtual void Init(VkDeviceSize size);
    5061 
    5062  virtual bool Validate() const;
    5063  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5064  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5065  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5066  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5067 
    5068  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5069  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5070 
    5071 #if VMA_STATS_STRING_ENABLED
    5072  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5073 #endif
    5074 
    5075  virtual bool CreateAllocationRequest(
    5076  uint32_t currentFrameIndex,
    5077  uint32_t frameInUseCount,
    5078  VkDeviceSize bufferImageGranularity,
    5079  VkDeviceSize allocSize,
    5080  VkDeviceSize allocAlignment,
    5081  bool upperAddress,
    5082  VmaSuballocationType allocType,
    5083  bool canMakeOtherLost,
    5084  uint32_t strategy,
    5085  VmaAllocationRequest* pAllocationRequest);
    5086 
    5087  virtual bool MakeRequestedAllocationsLost(
    5088  uint32_t currentFrameIndex,
    5089  uint32_t frameInUseCount,
    5090  VmaAllocationRequest* pAllocationRequest);
    5091 
    5092  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5093 
    5094  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5095 
    5096  virtual void Alloc(
    5097  const VmaAllocationRequest& request,
    5098  VmaSuballocationType type,
    5099  VkDeviceSize allocSize,
    5100  bool upperAddress,
    5101  VmaAllocation hAllocation);
    5102 
    5103  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5104  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5105 
    5106 private:
    5107  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5108  static const size_t MAX_LEVELS = 30;
    5109 
    5110  struct ValidationContext
    5111  {
    5112  size_t calculatedAllocationCount;
    5113  size_t calculatedFreeCount;
    5114  VkDeviceSize calculatedSumFreeSize;
    5115 
    5116  ValidationContext() :
    5117  calculatedAllocationCount(0),
    5118  calculatedFreeCount(0),
    5119  calculatedSumFreeSize(0) { }
    5120  };
    5121 
    5122  struct Node
    5123  {
    5124  VkDeviceSize offset;
    5125  enum TYPE
    5126  {
    5127  TYPE_FREE,
    5128  TYPE_ALLOCATION,
    5129  TYPE_SPLIT,
    5130  TYPE_COUNT
    5131  } type;
    5132  Node* parent;
    5133  Node* buddy;
    5134 
    5135  union
    5136  {
    5137  struct
    5138  {
    5139  Node* prev;
    5140  Node* next;
    5141  } free;
    5142  struct
    5143  {
    5144  VmaAllocation alloc;
    5145  } allocation;
    5146  struct
    5147  {
    5148  Node* leftChild;
    5149  } split;
    5150  };
    5151  };
    5152 
    5153  // Size of the memory block aligned down to a power of two.
    5154  VkDeviceSize m_UsableSize;
    5155  uint32_t m_LevelCount;
    5156 
    5157  Node* m_Root;
    5158  struct {
    5159  Node* front;
    5160  Node* back;
    5161  } m_FreeList[MAX_LEVELS];
    5162  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5163  size_t m_AllocationCount;
    5164  // Number of nodes in the tree with type == TYPE_FREE.
    5165  size_t m_FreeCount;
    5166  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5167  VkDeviceSize m_SumFreeSize;
    5168 
    5169  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5170  void DeleteNode(Node* node);
    5171  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5172  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5173  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5174  // Alloc passed just for validation. Can be null.
    5175  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5176  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5177  // Adds node to the front of FreeList at given level.
    5178  // node->type must be FREE.
    5179  // node->free.prev, next can be undefined.
    5180  void AddToFreeListFront(uint32_t level, Node* node);
    5181  // Removes node from FreeList at given level.
    5182  // node->type must be FREE.
    5183  // node->free.prev, next stay untouched.
    5184  void RemoveFromFreeList(uint32_t level, Node* node);
    5185 
    5186 #if VMA_STATS_STRING_ENABLED
    5187  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5188 #endif
    5189 };
    5190 
    5191 /*
    5192 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5193 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5194 
    5195 Thread-safety: This class must be externally synchronized.
    5196 */
    5197 class VmaDeviceMemoryBlock
    5198 {
    5199  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5200 public:
    5201  VmaBlockMetadata* m_pMetadata;
    5202 
    5203  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5204 
    5205  ~VmaDeviceMemoryBlock()
    5206  {
    5207  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5208  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5209  }
    5210 
    5211  // Always call after construction.
    5212  void Init(
    5213  VmaAllocator hAllocator,
    5214  uint32_t newMemoryTypeIndex,
    5215  VkDeviceMemory newMemory,
    5216  VkDeviceSize newSize,
    5217  uint32_t id,
    5218  uint32_t algorithm);
    5219  // Always call before destruction.
    5220  void Destroy(VmaAllocator allocator);
    5221 
    5222  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5223  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5224  uint32_t GetId() const { return m_Id; }
    5225  void* GetMappedData() const { return m_pMappedData; }
    5226 
    5227  // Validates all data structures inside this object. If not valid, returns false.
    5228  bool Validate() const;
    5229 
    5230  VkResult CheckCorruption(VmaAllocator hAllocator);
    5231 
    5232  // ppData can be null.
    5233  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5234  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5235 
    5236  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5237  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5238 
    5239  VkResult BindBufferMemory(
    5240  const VmaAllocator hAllocator,
    5241  const VmaAllocation hAllocation,
    5242  VkBuffer hBuffer);
    5243  VkResult BindImageMemory(
    5244  const VmaAllocator hAllocator,
    5245  const VmaAllocation hAllocation,
    5246  VkImage hImage);
    5247 
    5248 private:
    5249  uint32_t m_MemoryTypeIndex;
    5250  uint32_t m_Id;
    5251  VkDeviceMemory m_hMemory;
    5252 
    5253  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5254  // Also protects m_MapCount, m_pMappedData.
    5255  VMA_MUTEX m_Mutex;
    5256  uint32_t m_MapCount;
    5257  void* m_pMappedData;
    5258 };
    5259 
    5260 struct VmaPointerLess
    5261 {
    5262  bool operator()(const void* lhs, const void* rhs) const
    5263  {
    5264  return lhs < rhs;
    5265  }
    5266 };
    5267 
    5268 class VmaDefragmentator;
    5269 
    5270 /*
    5271 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5272 Vulkan memory type.
    5273 
    5274 Synchronized internally with a mutex.
    5275 */
    5276 struct VmaBlockVector
    5277 {
    5278  VMA_CLASS_NO_COPY(VmaBlockVector)
    5279 public:
    5280  VmaBlockVector(
    5281  VmaAllocator hAllocator,
    5282  uint32_t memoryTypeIndex,
    5283  VkDeviceSize preferredBlockSize,
    5284  size_t minBlockCount,
    5285  size_t maxBlockCount,
    5286  VkDeviceSize bufferImageGranularity,
    5287  uint32_t frameInUseCount,
    5288  bool isCustomPool,
    5289  bool explicitBlockSize,
    5290  uint32_t algorithm);
    5291  ~VmaBlockVector();
    5292 
    5293  VkResult CreateMinBlocks();
    5294 
    5295  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5296  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5297  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5298  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5299  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5300 
    5301  void GetPoolStats(VmaPoolStats* pStats);
    5302 
    5303  bool IsEmpty() const { return m_Blocks.empty(); }
    5304  bool IsCorruptionDetectionEnabled() const;
    5305 
    5306  VkResult Allocate(
    5307  VmaPool hCurrentPool,
    5308  uint32_t currentFrameIndex,
    5309  VkDeviceSize size,
    5310  VkDeviceSize alignment,
    5311  const VmaAllocationCreateInfo& createInfo,
    5312  VmaSuballocationType suballocType,
    5313  VmaAllocation* pAllocation);
    5314 
    5315  void Free(
    5316  VmaAllocation hAllocation);
    5317 
    5318  // Adds statistics of this BlockVector to pStats.
    5319  void AddStats(VmaStats* pStats);
    5320 
    5321 #if VMA_STATS_STRING_ENABLED
    5322  void PrintDetailedMap(class VmaJsonWriter& json);
    5323 #endif
    5324 
    5325  void MakePoolAllocationsLost(
    5326  uint32_t currentFrameIndex,
    5327  size_t* pLostAllocationCount);
    5328  VkResult CheckCorruption();
    5329 
    5330  VmaDefragmentator* EnsureDefragmentator(
    5331  VmaAllocator hAllocator,
    5332  uint32_t currentFrameIndex);
    5333 
    5334  VkResult Defragment(
    5335  VmaDefragmentationStats* pDefragmentationStats,
    5336  VkDeviceSize& maxBytesToMove,
    5337  uint32_t& maxAllocationsToMove);
    5338 
    5339  void DestroyDefragmentator();
    5340 
    5341 private:
    5342  friend class VmaDefragmentator;
    5343 
    5344  const VmaAllocator m_hAllocator;
    5345  const uint32_t m_MemoryTypeIndex;
    5346  const VkDeviceSize m_PreferredBlockSize;
    5347  const size_t m_MinBlockCount;
    5348  const size_t m_MaxBlockCount;
    5349  const VkDeviceSize m_BufferImageGranularity;
    5350  const uint32_t m_FrameInUseCount;
    5351  const bool m_IsCustomPool;
    5352  const bool m_ExplicitBlockSize;
    5353  const uint32_t m_Algorithm;
    5354  bool m_HasEmptyBlock;
    5355  VMA_MUTEX m_Mutex;
    5356  // Incrementally sorted by sumFreeSize, ascending.
    5357  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5358  /* There can be at most one allocation that is completely empty - a
    5359  hysteresis to avoid pessimistic case of alternating creation and destruction
    5360  of a VkDeviceMemory. */
    5361  VmaDefragmentator* m_pDefragmentator;
    5362  uint32_t m_NextBlockId;
    5363 
    5364  VkDeviceSize CalcMaxBlockSize() const;
    5365 
    5366  // Finds and removes given block from vector.
    5367  void Remove(VmaDeviceMemoryBlock* pBlock);
    5368 
    5369  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5370  // after this call.
    5371  void IncrementallySortBlocks();
    5372 
    5373  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5374  VkResult AllocateFromBlock(
    5375  VmaDeviceMemoryBlock* pBlock,
    5376  VmaPool hCurrentPool,
    5377  uint32_t currentFrameIndex,
    5378  VkDeviceSize size,
    5379  VkDeviceSize alignment,
    5380  VmaAllocationCreateFlags allocFlags,
    5381  void* pUserData,
    5382  VmaSuballocationType suballocType,
    5383  uint32_t strategy,
    5384  VmaAllocation* pAllocation);
    5385 
    5386  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5387 };
    5388 
    5389 struct VmaPool_T
    5390 {
    5391  VMA_CLASS_NO_COPY(VmaPool_T)
    5392 public:
    5393  VmaBlockVector m_BlockVector;
    5394 
    5395  VmaPool_T(
    5396  VmaAllocator hAllocator,
    5397  const VmaPoolCreateInfo& createInfo,
    5398  VkDeviceSize preferredBlockSize);
    5399  ~VmaPool_T();
    5400 
    5401  uint32_t GetId() const { return m_Id; }
    5402  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5403 
    5404 #if VMA_STATS_STRING_ENABLED
    5405  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5406 #endif
    5407 
    5408 private:
    5409  uint32_t m_Id;
    5410 };
    5411 
    5412 class VmaDefragmentator
    5413 {
    5414  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5415 private:
    5416  const VmaAllocator m_hAllocator;
    5417  VmaBlockVector* const m_pBlockVector;
    5418  uint32_t m_CurrentFrameIndex;
    5419  VkDeviceSize m_BytesMoved;
    5420  uint32_t m_AllocationsMoved;
    5421 
    5422  struct AllocationInfo
    5423  {
    5424  VmaAllocation m_hAllocation;
    5425  VkBool32* m_pChanged;
    5426 
    5427  AllocationInfo() :
    5428  m_hAllocation(VK_NULL_HANDLE),
    5429  m_pChanged(VMA_NULL)
    5430  {
    5431  }
    5432  };
    5433 
    5434  struct AllocationInfoSizeGreater
    5435  {
    5436  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5437  {
    5438  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5439  }
    5440  };
    5441 
    5442  // Used between AddAllocation and Defragment.
    5443  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5444 
    5445  struct BlockInfo
    5446  {
    5447  VmaDeviceMemoryBlock* m_pBlock;
    5448  bool m_HasNonMovableAllocations;
    5449  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5450 
    5451  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5452  m_pBlock(VMA_NULL),
    5453  m_HasNonMovableAllocations(true),
    5454  m_Allocations(pAllocationCallbacks),
    5455  m_pMappedDataForDefragmentation(VMA_NULL)
    5456  {
    5457  }
    5458 
    5459  void CalcHasNonMovableAllocations()
    5460  {
    5461  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5462  const size_t defragmentAllocCount = m_Allocations.size();
    5463  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5464  }
    5465 
    5466  void SortAllocationsBySizeDescecnding()
    5467  {
    5468  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5469  }
    5470 
    5471  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5472  void Unmap(VmaAllocator hAllocator);
    5473 
    5474  private:
    5475  // Not null if mapped for defragmentation only, not originally mapped.
    5476  void* m_pMappedDataForDefragmentation;
    5477  };
    5478 
    5479  struct BlockPointerLess
    5480  {
    5481  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5482  {
    5483  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5484  }
    5485  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5486  {
    5487  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5488  }
    5489  };
    5490 
    5491  // 1. Blocks with some non-movable allocations go first.
    5492  // 2. Blocks with smaller sumFreeSize go first.
    5493  struct BlockInfoCompareMoveDestination
    5494  {
    5495  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5496  {
    5497  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5498  {
    5499  return true;
    5500  }
    5501  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5502  {
    5503  return false;
    5504  }
    5505  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5506  {
    5507  return true;
    5508  }
    5509  return false;
    5510  }
    5511  };
    5512 
    5513  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5514  BlockInfoVector m_Blocks;
    5515 
    5516  VkResult DefragmentRound(
    5517  VkDeviceSize maxBytesToMove,
    5518  uint32_t maxAllocationsToMove);
    5519 
    5520  static bool MoveMakesSense(
    5521  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5522  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5523 
    5524 public:
    5525  VmaDefragmentator(
    5526  VmaAllocator hAllocator,
    5527  VmaBlockVector* pBlockVector,
    5528  uint32_t currentFrameIndex);
    5529 
    5530  ~VmaDefragmentator();
    5531 
    5532  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5533  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5534 
    5535  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5536 
    5537  VkResult Defragment(
    5538  VkDeviceSize maxBytesToMove,
    5539  uint32_t maxAllocationsToMove);
    5540 };
    5541 
    5542 #if VMA_RECORDING_ENABLED
    5543 
    5544 class VmaRecorder
    5545 {
    5546 public:
    5547  VmaRecorder();
    5548  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5549  void WriteConfiguration(
    5550  const VkPhysicalDeviceProperties& devProps,
    5551  const VkPhysicalDeviceMemoryProperties& memProps,
    5552  bool dedicatedAllocationExtensionEnabled);
    5553  ~VmaRecorder();
    5554 
    5555  void RecordCreateAllocator(uint32_t frameIndex);
    5556  void RecordDestroyAllocator(uint32_t frameIndex);
    5557  void RecordCreatePool(uint32_t frameIndex,
    5558  const VmaPoolCreateInfo& createInfo,
    5559  VmaPool pool);
    5560  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5561  void RecordAllocateMemory(uint32_t frameIndex,
    5562  const VkMemoryRequirements& vkMemReq,
    5563  const VmaAllocationCreateInfo& createInfo,
    5564  VmaAllocation allocation);
    5565  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5566  const VkMemoryRequirements& vkMemReq,
    5567  bool requiresDedicatedAllocation,
    5568  bool prefersDedicatedAllocation,
    5569  const VmaAllocationCreateInfo& createInfo,
    5570  VmaAllocation allocation);
    5571  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5572  const VkMemoryRequirements& vkMemReq,
    5573  bool requiresDedicatedAllocation,
    5574  bool prefersDedicatedAllocation,
    5575  const VmaAllocationCreateInfo& createInfo,
    5576  VmaAllocation allocation);
    5577  void RecordFreeMemory(uint32_t frameIndex,
    5578  VmaAllocation allocation);
    5579  void RecordSetAllocationUserData(uint32_t frameIndex,
    5580  VmaAllocation allocation,
    5581  const void* pUserData);
    5582  void RecordCreateLostAllocation(uint32_t frameIndex,
    5583  VmaAllocation allocation);
    5584  void RecordMapMemory(uint32_t frameIndex,
    5585  VmaAllocation allocation);
    5586  void RecordUnmapMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordFlushAllocation(uint32_t frameIndex,
    5589  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5590  void RecordInvalidateAllocation(uint32_t frameIndex,
    5591  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5592  void RecordCreateBuffer(uint32_t frameIndex,
    5593  const VkBufferCreateInfo& bufCreateInfo,
    5594  const VmaAllocationCreateInfo& allocCreateInfo,
    5595  VmaAllocation allocation);
    5596  void RecordCreateImage(uint32_t frameIndex,
    5597  const VkImageCreateInfo& imageCreateInfo,
    5598  const VmaAllocationCreateInfo& allocCreateInfo,
    5599  VmaAllocation allocation);
    5600  void RecordDestroyBuffer(uint32_t frameIndex,
    5601  VmaAllocation allocation);
    5602  void RecordDestroyImage(uint32_t frameIndex,
    5603  VmaAllocation allocation);
    5604  void RecordTouchAllocation(uint32_t frameIndex,
    5605  VmaAllocation allocation);
    5606  void RecordGetAllocationInfo(uint32_t frameIndex,
    5607  VmaAllocation allocation);
    5608  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5609  VmaPool pool);
    5610 
    5611 private:
    5612  struct CallParams
    5613  {
    5614  uint32_t threadId;
    5615  double time;
    5616  };
    5617 
    5618  class UserDataString
    5619  {
    5620  public:
    5621  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5622  const char* GetString() const { return m_Str; }
    5623 
    5624  private:
    5625  char m_PtrStr[17];
    5626  const char* m_Str;
    5627  };
    5628 
    5629  bool m_UseMutex;
    5630  VmaRecordFlags m_Flags;
    5631  FILE* m_File;
    5632  VMA_MUTEX m_FileMutex;
    5633  int64_t m_Freq;
    5634  int64_t m_StartCounter;
    5635 
    5636  void GetBasicParams(CallParams& outParams);
    5637  void Flush();
    5638 };
    5639 
    5640 #endif // #if VMA_RECORDING_ENABLED
    5641 
    5642 // Main allocator object.
    5643 struct VmaAllocator_T
    5644 {
    5645  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5646 public:
    5647  bool m_UseMutex;
    5648  bool m_UseKhrDedicatedAllocation;
    5649  VkDevice m_hDevice;
    5650  bool m_AllocationCallbacksSpecified;
    5651  VkAllocationCallbacks m_AllocationCallbacks;
    5652  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5653 
    5654  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5655  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5656  VMA_MUTEX m_HeapSizeLimitMutex;
    5657 
    5658  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5659  VkPhysicalDeviceMemoryProperties m_MemProps;
    5660 
    5661  // Default pools.
    5662  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5663 
    5664  // Each vector is sorted by memory (handle value).
    5665  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5666  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5667  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5668 
    5669  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5670  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5671  ~VmaAllocator_T();
    5672 
    5673  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5674  {
    5675  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5676  }
    5677  const VmaVulkanFunctions& GetVulkanFunctions() const
    5678  {
    5679  return m_VulkanFunctions;
    5680  }
    5681 
    5682  VkDeviceSize GetBufferImageGranularity() const
    5683  {
    5684  return VMA_MAX(
    5685  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5686  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5687  }
    5688 
    5689  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5690  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5691 
    5692  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5693  {
    5694  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5695  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5696  }
    5697  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5698  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5699  {
    5700  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5701  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5702  }
    5703  // Minimum alignment for all allocations in specific memory type.
    5704  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5705  {
    5706  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5707  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5708  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5709  }
    5710 
    5711  bool IsIntegratedGpu() const
    5712  {
    5713  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5714  }
    5715 
    5716 #if VMA_RECORDING_ENABLED
    5717  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5718 #endif
    5719 
    5720  void GetBufferMemoryRequirements(
    5721  VkBuffer hBuffer,
    5722  VkMemoryRequirements& memReq,
    5723  bool& requiresDedicatedAllocation,
    5724  bool& prefersDedicatedAllocation) const;
    5725  void GetImageMemoryRequirements(
    5726  VkImage hImage,
    5727  VkMemoryRequirements& memReq,
    5728  bool& requiresDedicatedAllocation,
    5729  bool& prefersDedicatedAllocation) const;
    5730 
    5731  // Main allocation function.
    5732  VkResult AllocateMemory(
    5733  const VkMemoryRequirements& vkMemReq,
    5734  bool requiresDedicatedAllocation,
    5735  bool prefersDedicatedAllocation,
    5736  VkBuffer dedicatedBuffer,
    5737  VkImage dedicatedImage,
    5738  const VmaAllocationCreateInfo& createInfo,
    5739  VmaSuballocationType suballocType,
    5740  VmaAllocation* pAllocation);
    5741 
    5742  // Main deallocation function.
    5743  void FreeMemory(const VmaAllocation allocation);
    5744 
    5745  void CalculateStats(VmaStats* pStats);
    5746 
    5747 #if VMA_STATS_STRING_ENABLED
    5748  void PrintDetailedMap(class VmaJsonWriter& json);
    5749 #endif
    5750 
    5751  VkResult Defragment(
    5752  VmaAllocation* pAllocations,
    5753  size_t allocationCount,
    5754  VkBool32* pAllocationsChanged,
    5755  const VmaDefragmentationInfo* pDefragmentationInfo,
    5756  VmaDefragmentationStats* pDefragmentationStats);
    5757 
    5758  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5759  bool TouchAllocation(VmaAllocation hAllocation);
    5760 
    5761  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5762  void DestroyPool(VmaPool pool);
    5763  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5764 
    5765  void SetCurrentFrameIndex(uint32_t frameIndex);
    5766  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5767 
    5768  void MakePoolAllocationsLost(
    5769  VmaPool hPool,
    5770  size_t* pLostAllocationCount);
    5771  VkResult CheckPoolCorruption(VmaPool hPool);
    5772  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5773 
    5774  void CreateLostAllocation(VmaAllocation* pAllocation);
    5775 
    5776  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5777  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5778 
    5779  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5780  void Unmap(VmaAllocation hAllocation);
    5781 
    5782  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5783  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5784 
    5785  void FlushOrInvalidateAllocation(
    5786  VmaAllocation hAllocation,
    5787  VkDeviceSize offset, VkDeviceSize size,
    5788  VMA_CACHE_OPERATION op);
    5789 
    5790  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5791 
    5792 private:
    5793  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5794 
    5795  VkPhysicalDevice m_PhysicalDevice;
    5796  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5797 
    5798  VMA_MUTEX m_PoolsMutex;
    5799  // Protected by m_PoolsMutex. Sorted by pointer value.
    5800  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5801  uint32_t m_NextPoolId;
    5802 
    5803  VmaVulkanFunctions m_VulkanFunctions;
    5804 
    5805 #if VMA_RECORDING_ENABLED
    5806  VmaRecorder* m_pRecorder;
    5807 #endif
    5808 
    5809  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5810 
    5811  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5812 
    5813  VkResult AllocateMemoryOfType(
    5814  VkDeviceSize size,
    5815  VkDeviceSize alignment,
    5816  bool dedicatedAllocation,
    5817  VkBuffer dedicatedBuffer,
    5818  VkImage dedicatedImage,
    5819  const VmaAllocationCreateInfo& createInfo,
    5820  uint32_t memTypeIndex,
    5821  VmaSuballocationType suballocType,
    5822  VmaAllocation* pAllocation);
    5823 
    5824  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5825  VkResult AllocateDedicatedMemory(
    5826  VkDeviceSize size,
    5827  VmaSuballocationType suballocType,
    5828  uint32_t memTypeIndex,
    5829  bool map,
    5830  bool isUserDataString,
    5831  void* pUserData,
    5832  VkBuffer dedicatedBuffer,
    5833  VkImage dedicatedImage,
    5834  VmaAllocation* pAllocation);
    5835 
    5836  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5837  void FreeDedicatedMemory(VmaAllocation allocation);
    5838 };
    5839 
    5841 // Memory allocation #2 after VmaAllocator_T definition
    5842 
    5843 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5844 {
    5845  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5846 }
    5847 
    5848 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5849 {
    5850  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5851 }
    5852 
    5853 template<typename T>
    5854 static T* VmaAllocate(VmaAllocator hAllocator)
    5855 {
    5856  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5857 }
    5858 
    5859 template<typename T>
    5860 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5861 {
    5862  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5863 }
    5864 
    5865 template<typename T>
    5866 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5867 {
    5868  if(ptr != VMA_NULL)
    5869  {
    5870  ptr->~T();
    5871  VmaFree(hAllocator, ptr);
    5872  }
    5873 }
    5874 
    5875 template<typename T>
    5876 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5877 {
    5878  if(ptr != VMA_NULL)
    5879  {
    5880  for(size_t i = count; i--; )
    5881  ptr[i].~T();
    5882  VmaFree(hAllocator, ptr);
    5883  }
    5884 }
    5885 
    5887 // VmaStringBuilder
    5888 
    5889 #if VMA_STATS_STRING_ENABLED
    5890 
    5891 class VmaStringBuilder
    5892 {
    5893 public:
    5894  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5895  size_t GetLength() const { return m_Data.size(); }
    5896  const char* GetData() const { return m_Data.data(); }
    5897 
    5898  void Add(char ch) { m_Data.push_back(ch); }
    5899  void Add(const char* pStr);
    5900  void AddNewLine() { Add('\n'); }
    5901  void AddNumber(uint32_t num);
    5902  void AddNumber(uint64_t num);
    5903  void AddPointer(const void* ptr);
    5904 
    5905 private:
    5906  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5907 };
    5908 
    5909 void VmaStringBuilder::Add(const char* pStr)
    5910 {
    5911  const size_t strLen = strlen(pStr);
    5912  if(strLen > 0)
    5913  {
    5914  const size_t oldCount = m_Data.size();
    5915  m_Data.resize(oldCount + strLen);
    5916  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5917  }
    5918 }
    5919 
    5920 void VmaStringBuilder::AddNumber(uint32_t num)
    5921 {
    5922  char buf[11];
    5923  VmaUint32ToStr(buf, sizeof(buf), num);
    5924  Add(buf);
    5925 }
    5926 
    5927 void VmaStringBuilder::AddNumber(uint64_t num)
    5928 {
    5929  char buf[21];
    5930  VmaUint64ToStr(buf, sizeof(buf), num);
    5931  Add(buf);
    5932 }
    5933 
    5934 void VmaStringBuilder::AddPointer(const void* ptr)
    5935 {
    5936  char buf[21];
    5937  VmaPtrToStr(buf, sizeof(buf), ptr);
    5938  Add(buf);
    5939 }
    5940 
    5941 #endif // #if VMA_STATS_STRING_ENABLED
    5942 
    5944 // VmaJsonWriter
    5945 
    5946 #if VMA_STATS_STRING_ENABLED
    5947 
    5948 class VmaJsonWriter
    5949 {
    5950  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5951 public:
    5952  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5953  ~VmaJsonWriter();
    5954 
    5955  void BeginObject(bool singleLine = false);
    5956  void EndObject();
    5957 
    5958  void BeginArray(bool singleLine = false);
    5959  void EndArray();
    5960 
    5961  void WriteString(const char* pStr);
    5962  void BeginString(const char* pStr = VMA_NULL);
    5963  void ContinueString(const char* pStr);
    5964  void ContinueString(uint32_t n);
    5965  void ContinueString(uint64_t n);
    5966  void ContinueString_Pointer(const void* ptr);
    5967  void EndString(const char* pStr = VMA_NULL);
    5968 
    5969  void WriteNumber(uint32_t n);
    5970  void WriteNumber(uint64_t n);
    5971  void WriteBool(bool b);
    5972  void WriteNull();
    5973 
    5974 private:
    5975  static const char* const INDENT;
    5976 
    5977  enum COLLECTION_TYPE
    5978  {
    5979  COLLECTION_TYPE_OBJECT,
    5980  COLLECTION_TYPE_ARRAY,
    5981  };
    5982  struct StackItem
    5983  {
    5984  COLLECTION_TYPE type;
    5985  uint32_t valueCount;
    5986  bool singleLineMode;
    5987  };
    5988 
    5989  VmaStringBuilder& m_SB;
    5990  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    5991  bool m_InsideString;
    5992 
    5993  void BeginValue(bool isString);
    5994  void WriteIndent(bool oneLess = false);
    5995 };
    5996 
    5997 const char* const VmaJsonWriter::INDENT = " ";
    5998 
    5999 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6000  m_SB(sb),
    6001  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6002  m_InsideString(false)
    6003 {
    6004 }
    6005 
    6006 VmaJsonWriter::~VmaJsonWriter()
    6007 {
    6008  VMA_ASSERT(!m_InsideString);
    6009  VMA_ASSERT(m_Stack.empty());
    6010 }
    6011 
    6012 void VmaJsonWriter::BeginObject(bool singleLine)
    6013 {
    6014  VMA_ASSERT(!m_InsideString);
    6015 
    6016  BeginValue(false);
    6017  m_SB.Add('{');
    6018 
    6019  StackItem item;
    6020  item.type = COLLECTION_TYPE_OBJECT;
    6021  item.valueCount = 0;
    6022  item.singleLineMode = singleLine;
    6023  m_Stack.push_back(item);
    6024 }
    6025 
    6026 void VmaJsonWriter::EndObject()
    6027 {
    6028  VMA_ASSERT(!m_InsideString);
    6029 
    6030  WriteIndent(true);
    6031  m_SB.Add('}');
    6032 
    6033  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6034  m_Stack.pop_back();
    6035 }
    6036 
    6037 void VmaJsonWriter::BeginArray(bool singleLine)
    6038 {
    6039  VMA_ASSERT(!m_InsideString);
    6040 
    6041  BeginValue(false);
    6042  m_SB.Add('[');
    6043 
    6044  StackItem item;
    6045  item.type = COLLECTION_TYPE_ARRAY;
    6046  item.valueCount = 0;
    6047  item.singleLineMode = singleLine;
    6048  m_Stack.push_back(item);
    6049 }
    6050 
    6051 void VmaJsonWriter::EndArray()
    6052 {
    6053  VMA_ASSERT(!m_InsideString);
    6054 
    6055  WriteIndent(true);
    6056  m_SB.Add(']');
    6057 
    6058  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6059  m_Stack.pop_back();
    6060 }
    6061 
    6062 void VmaJsonWriter::WriteString(const char* pStr)
    6063 {
    6064  BeginString(pStr);
    6065  EndString();
    6066 }
    6067 
    6068 void VmaJsonWriter::BeginString(const char* pStr)
    6069 {
    6070  VMA_ASSERT(!m_InsideString);
    6071 
    6072  BeginValue(true);
    6073  m_SB.Add('"');
    6074  m_InsideString = true;
    6075  if(pStr != VMA_NULL && pStr[0] != '\0')
    6076  {
    6077  ContinueString(pStr);
    6078  }
    6079 }
    6080 
    6081 void VmaJsonWriter::ContinueString(const char* pStr)
    6082 {
    6083  VMA_ASSERT(m_InsideString);
    6084 
    6085  const size_t strLen = strlen(pStr);
    6086  for(size_t i = 0; i < strLen; ++i)
    6087  {
    6088  char ch = pStr[i];
    6089  if(ch == '\\')
    6090  {
    6091  m_SB.Add("\\\\");
    6092  }
    6093  else if(ch == '"')
    6094  {
    6095  m_SB.Add("\\\"");
    6096  }
    6097  else if(ch >= 32)
    6098  {
    6099  m_SB.Add(ch);
    6100  }
    6101  else switch(ch)
    6102  {
    6103  case '\b':
    6104  m_SB.Add("\\b");
    6105  break;
    6106  case '\f':
    6107  m_SB.Add("\\f");
    6108  break;
    6109  case '\n':
    6110  m_SB.Add("\\n");
    6111  break;
    6112  case '\r':
    6113  m_SB.Add("\\r");
    6114  break;
    6115  case '\t':
    6116  m_SB.Add("\\t");
    6117  break;
    6118  default:
    6119  VMA_ASSERT(0 && "Character not currently supported.");
    6120  break;
    6121  }
    6122  }
    6123 }
    6124 
    6125 void VmaJsonWriter::ContinueString(uint32_t n)
    6126 {
    6127  VMA_ASSERT(m_InsideString);
    6128  m_SB.AddNumber(n);
    6129 }
    6130 
    6131 void VmaJsonWriter::ContinueString(uint64_t n)
    6132 {
    6133  VMA_ASSERT(m_InsideString);
    6134  m_SB.AddNumber(n);
    6135 }
    6136 
    6137 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6138 {
    6139  VMA_ASSERT(m_InsideString);
    6140  m_SB.AddPointer(ptr);
    6141 }
    6142 
    6143 void VmaJsonWriter::EndString(const char* pStr)
    6144 {
    6145  VMA_ASSERT(m_InsideString);
    6146  if(pStr != VMA_NULL && pStr[0] != '\0')
    6147  {
    6148  ContinueString(pStr);
    6149  }
    6150  m_SB.Add('"');
    6151  m_InsideString = false;
    6152 }
    6153 
    6154 void VmaJsonWriter::WriteNumber(uint32_t n)
    6155 {
    6156  VMA_ASSERT(!m_InsideString);
    6157  BeginValue(false);
    6158  m_SB.AddNumber(n);
    6159 }
    6160 
    6161 void VmaJsonWriter::WriteNumber(uint64_t n)
    6162 {
    6163  VMA_ASSERT(!m_InsideString);
    6164  BeginValue(false);
    6165  m_SB.AddNumber(n);
    6166 }
    6167 
    6168 void VmaJsonWriter::WriteBool(bool b)
    6169 {
    6170  VMA_ASSERT(!m_InsideString);
    6171  BeginValue(false);
    6172  m_SB.Add(b ? "true" : "false");
    6173 }
    6174 
    6175 void VmaJsonWriter::WriteNull()
    6176 {
    6177  VMA_ASSERT(!m_InsideString);
    6178  BeginValue(false);
    6179  m_SB.Add("null");
    6180 }
    6181 
    6182 void VmaJsonWriter::BeginValue(bool isString)
    6183 {
    6184  if(!m_Stack.empty())
    6185  {
    6186  StackItem& currItem = m_Stack.back();
    6187  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6188  currItem.valueCount % 2 == 0)
    6189  {
    6190  VMA_ASSERT(isString);
    6191  }
    6192 
    6193  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6194  currItem.valueCount % 2 != 0)
    6195  {
    6196  m_SB.Add(": ");
    6197  }
    6198  else if(currItem.valueCount > 0)
    6199  {
    6200  m_SB.Add(", ");
    6201  WriteIndent();
    6202  }
    6203  else
    6204  {
    6205  WriteIndent();
    6206  }
    6207  ++currItem.valueCount;
    6208  }
    6209 }
    6210 
    6211 void VmaJsonWriter::WriteIndent(bool oneLess)
    6212 {
    6213  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6214  {
    6215  m_SB.AddNewLine();
    6216 
    6217  size_t count = m_Stack.size();
    6218  if(count > 0 && oneLess)
    6219  {
    6220  --count;
    6221  }
    6222  for(size_t i = 0; i < count; ++i)
    6223  {
    6224  m_SB.Add(INDENT);
    6225  }
    6226  }
    6227 }
    6228 
    6229 #endif // #if VMA_STATS_STRING_ENABLED
    6230 
    6232 
    6233 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6234 {
    6235  if(IsUserDataString())
    6236  {
    6237  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6238 
    6239  FreeUserDataString(hAllocator);
    6240 
    6241  if(pUserData != VMA_NULL)
    6242  {
    6243  const char* const newStrSrc = (char*)pUserData;
    6244  const size_t newStrLen = strlen(newStrSrc);
    6245  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6246  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6247  m_pUserData = newStrDst;
    6248  }
    6249  }
    6250  else
    6251  {
    6252  m_pUserData = pUserData;
    6253  }
    6254 }
    6255 
    6256 void VmaAllocation_T::ChangeBlockAllocation(
    6257  VmaAllocator hAllocator,
    6258  VmaDeviceMemoryBlock* block,
    6259  VkDeviceSize offset)
    6260 {
    6261  VMA_ASSERT(block != VMA_NULL);
    6262  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6263 
    6264  // Move mapping reference counter from old block to new block.
    6265  if(block != m_BlockAllocation.m_Block)
    6266  {
    6267  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6268  if(IsPersistentMap())
    6269  ++mapRefCount;
    6270  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6271  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6272  }
    6273 
    6274  m_BlockAllocation.m_Block = block;
    6275  m_BlockAllocation.m_Offset = offset;
    6276 }
    6277 
    6278 VkDeviceSize VmaAllocation_T::GetOffset() const
    6279 {
    6280  switch(m_Type)
    6281  {
    6282  case ALLOCATION_TYPE_BLOCK:
    6283  return m_BlockAllocation.m_Offset;
    6284  case ALLOCATION_TYPE_DEDICATED:
    6285  return 0;
    6286  default:
    6287  VMA_ASSERT(0);
    6288  return 0;
    6289  }
    6290 }
    6291 
    6292 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6293 {
    6294  switch(m_Type)
    6295  {
    6296  case ALLOCATION_TYPE_BLOCK:
    6297  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6298  case ALLOCATION_TYPE_DEDICATED:
    6299  return m_DedicatedAllocation.m_hMemory;
    6300  default:
    6301  VMA_ASSERT(0);
    6302  return VK_NULL_HANDLE;
    6303  }
    6304 }
    6305 
    6306 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6307 {
    6308  switch(m_Type)
    6309  {
    6310  case ALLOCATION_TYPE_BLOCK:
    6311  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6312  case ALLOCATION_TYPE_DEDICATED:
    6313  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6314  default:
    6315  VMA_ASSERT(0);
    6316  return UINT32_MAX;
    6317  }
    6318 }
    6319 
    6320 void* VmaAllocation_T::GetMappedData() const
    6321 {
    6322  switch(m_Type)
    6323  {
    6324  case ALLOCATION_TYPE_BLOCK:
    6325  if(m_MapCount != 0)
    6326  {
    6327  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6328  VMA_ASSERT(pBlockData != VMA_NULL);
    6329  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6330  }
    6331  else
    6332  {
    6333  return VMA_NULL;
    6334  }
    6335  break;
    6336  case ALLOCATION_TYPE_DEDICATED:
    6337  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6338  return m_DedicatedAllocation.m_pMappedData;
    6339  default:
    6340  VMA_ASSERT(0);
    6341  return VMA_NULL;
    6342  }
    6343 }
    6344 
    6345 bool VmaAllocation_T::CanBecomeLost() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_CanBecomeLost;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return false;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return false;
    6356  }
    6357 }
    6358 
    6359 VmaPool VmaAllocation_T::GetPool() const
    6360 {
    6361  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6362  return m_BlockAllocation.m_hPool;
    6363 }
    6364 
    6365 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6366 {
    6367  VMA_ASSERT(CanBecomeLost());
    6368 
    6369  /*
    6370  Warning: This is a carefully designed algorithm.
    6371  Do not modify unless you really know what you're doing :)
    6372  */
    6373  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6374  for(;;)
    6375  {
    6376  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6377  {
    6378  VMA_ASSERT(0);
    6379  return false;
    6380  }
    6381  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6382  {
    6383  return false;
    6384  }
    6385  else // Last use time earlier than current time.
    6386  {
    6387  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6388  {
    6389  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6390  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6391  return true;
    6392  }
    6393  }
    6394  }
    6395 }
    6396 
    6397 #if VMA_STATS_STRING_ENABLED
    6398 
    6399 // Correspond to values of enum VmaSuballocationType.
    6400 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6401  "FREE",
    6402  "UNKNOWN",
    6403  "BUFFER",
    6404  "IMAGE_UNKNOWN",
    6405  "IMAGE_LINEAR",
    6406  "IMAGE_OPTIMAL",
    6407 };
    6408 
    6409 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6410 {
    6411  json.WriteString("Type");
    6412  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6413 
    6414  json.WriteString("Size");
    6415  json.WriteNumber(m_Size);
    6416 
    6417  if(m_pUserData != VMA_NULL)
    6418  {
    6419  json.WriteString("UserData");
    6420  if(IsUserDataString())
    6421  {
    6422  json.WriteString((const char*)m_pUserData);
    6423  }
    6424  else
    6425  {
    6426  json.BeginString();
    6427  json.ContinueString_Pointer(m_pUserData);
    6428  json.EndString();
    6429  }
    6430  }
    6431 
    6432  json.WriteString("CreationFrameIndex");
    6433  json.WriteNumber(m_CreationFrameIndex);
    6434 
    6435  json.WriteString("LastUseFrameIndex");
    6436  json.WriteNumber(GetLastUseFrameIndex());
    6437 
    6438  if(m_BufferImageUsage != 0)
    6439  {
    6440  json.WriteString("Usage");
    6441  json.WriteNumber(m_BufferImageUsage);
    6442  }
    6443 }
    6444 
    6445 #endif
    6446 
    6447 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6448 {
    6449  VMA_ASSERT(IsUserDataString());
    6450  if(m_pUserData != VMA_NULL)
    6451  {
    6452  char* const oldStr = (char*)m_pUserData;
    6453  const size_t oldStrLen = strlen(oldStr);
    6454  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6455  m_pUserData = VMA_NULL;
    6456  }
    6457 }
    6458 
    6459 void VmaAllocation_T::BlockAllocMap()
    6460 {
    6461  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6462 
    6463  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6464  {
    6465  ++m_MapCount;
    6466  }
    6467  else
    6468  {
    6469  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6470  }
    6471 }
    6472 
    6473 void VmaAllocation_T::BlockAllocUnmap()
    6474 {
    6475  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6476 
    6477  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6478  {
    6479  --m_MapCount;
    6480  }
    6481  else
    6482  {
    6483  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6484  }
    6485 }
    6486 
    6487 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6488 {
    6489  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6490 
    6491  if(m_MapCount != 0)
    6492  {
    6493  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6494  {
    6495  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6496  *ppData = m_DedicatedAllocation.m_pMappedData;
    6497  ++m_MapCount;
    6498  return VK_SUCCESS;
    6499  }
    6500  else
    6501  {
    6502  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6503  return VK_ERROR_MEMORY_MAP_FAILED;
    6504  }
    6505  }
    6506  else
    6507  {
    6508  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6509  hAllocator->m_hDevice,
    6510  m_DedicatedAllocation.m_hMemory,
    6511  0, // offset
    6512  VK_WHOLE_SIZE,
    6513  0, // flags
    6514  ppData);
    6515  if(result == VK_SUCCESS)
    6516  {
    6517  m_DedicatedAllocation.m_pMappedData = *ppData;
    6518  m_MapCount = 1;
    6519  }
    6520  return result;
    6521  }
    6522 }
    6523 
    6524 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6525 {
    6526  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6527 
    6528  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6529  {
    6530  --m_MapCount;
    6531  if(m_MapCount == 0)
    6532  {
    6533  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6534  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6535  hAllocator->m_hDevice,
    6536  m_DedicatedAllocation.m_hMemory);
    6537  }
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6542  }
    6543 }
    6544 
    6545 #if VMA_STATS_STRING_ENABLED
    6546 
    6547 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6548 {
    6549  json.BeginObject();
    6550 
    6551  json.WriteString("Blocks");
    6552  json.WriteNumber(stat.blockCount);
    6553 
    6554  json.WriteString("Allocations");
    6555  json.WriteNumber(stat.allocationCount);
    6556 
    6557  json.WriteString("UnusedRanges");
    6558  json.WriteNumber(stat.unusedRangeCount);
    6559 
    6560  json.WriteString("UsedBytes");
    6561  json.WriteNumber(stat.usedBytes);
    6562 
    6563  json.WriteString("UnusedBytes");
    6564  json.WriteNumber(stat.unusedBytes);
    6565 
    6566  if(stat.allocationCount > 1)
    6567  {
    6568  json.WriteString("AllocationSize");
    6569  json.BeginObject(true);
    6570  json.WriteString("Min");
    6571  json.WriteNumber(stat.allocationSizeMin);
    6572  json.WriteString("Avg");
    6573  json.WriteNumber(stat.allocationSizeAvg);
    6574  json.WriteString("Max");
    6575  json.WriteNumber(stat.allocationSizeMax);
    6576  json.EndObject();
    6577  }
    6578 
    6579  if(stat.unusedRangeCount > 1)
    6580  {
    6581  json.WriteString("UnusedRangeSize");
    6582  json.BeginObject(true);
    6583  json.WriteString("Min");
    6584  json.WriteNumber(stat.unusedRangeSizeMin);
    6585  json.WriteString("Avg");
    6586  json.WriteNumber(stat.unusedRangeSizeAvg);
    6587  json.WriteString("Max");
    6588  json.WriteNumber(stat.unusedRangeSizeMax);
    6589  json.EndObject();
    6590  }
    6591 
    6592  json.EndObject();
    6593 }
    6594 
    6595 #endif // #if VMA_STATS_STRING_ENABLED
    6596 
    6597 struct VmaSuballocationItemSizeLess
    6598 {
    6599  bool operator()(
    6600  const VmaSuballocationList::iterator lhs,
    6601  const VmaSuballocationList::iterator rhs) const
    6602  {
    6603  return lhs->size < rhs->size;
    6604  }
    6605  bool operator()(
    6606  const VmaSuballocationList::iterator lhs,
    6607  VkDeviceSize rhsSize) const
    6608  {
    6609  return lhs->size < rhsSize;
    6610  }
    6611 };
    6612 
    6613 
    6615 // class VmaBlockMetadata
    6616 
    6617 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6618  m_Size(0),
    6619  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6620 {
    6621 }
    6622 
    6623 #if VMA_STATS_STRING_ENABLED
    6624 
    6625 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6626  VkDeviceSize unusedBytes,
    6627  size_t allocationCount,
    6628  size_t unusedRangeCount) const
    6629 {
    6630  json.BeginObject();
    6631 
    6632  json.WriteString("TotalBytes");
    6633  json.WriteNumber(GetSize());
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(unusedBytes);
    6637 
    6638  json.WriteString("Allocations");
    6639  json.WriteNumber((uint64_t)allocationCount);
    6640 
    6641  json.WriteString("UnusedRanges");
    6642  json.WriteNumber((uint64_t)unusedRangeCount);
    6643 
    6644  json.WriteString("Suballocations");
    6645  json.BeginArray();
    6646 }
    6647 
    6648 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6649  VkDeviceSize offset,
    6650  VmaAllocation hAllocation) const
    6651 {
    6652  json.BeginObject(true);
    6653 
    6654  json.WriteString("Offset");
    6655  json.WriteNumber(offset);
    6656 
    6657  hAllocation->PrintParameters(json);
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6663  VkDeviceSize offset,
    6664  VkDeviceSize size) const
    6665 {
    6666  json.BeginObject(true);
    6667 
    6668  json.WriteString("Offset");
    6669  json.WriteNumber(offset);
    6670 
    6671  json.WriteString("Type");
    6672  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6673 
    6674  json.WriteString("Size");
    6675  json.WriteNumber(size);
    6676 
    6677  json.EndObject();
    6678 }
    6679 
    6680 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6681 {
    6682  json.EndArray();
    6683  json.EndObject();
    6684 }
    6685 
    6686 #endif // #if VMA_STATS_STRING_ENABLED
    6687 
    6689 // class VmaBlockMetadata_Generic
    6690 
    6691 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6692  VmaBlockMetadata(hAllocator),
    6693  m_FreeCount(0),
    6694  m_SumFreeSize(0),
    6695  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6696  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6697 {
    6698 }
    6699 
    6700 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6701 {
    6702 }
    6703 
    6704 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6705 {
    6706  VmaBlockMetadata::Init(size);
    6707 
    6708  m_FreeCount = 1;
    6709  m_SumFreeSize = size;
    6710 
    6711  VmaSuballocation suballoc = {};
    6712  suballoc.offset = 0;
    6713  suballoc.size = size;
    6714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6715  suballoc.hAllocation = VK_NULL_HANDLE;
    6716 
    6717  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6718  m_Suballocations.push_back(suballoc);
    6719  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6720  --suballocItem;
    6721  m_FreeSuballocationsBySize.push_back(suballocItem);
    6722 }
    6723 
    6724 bool VmaBlockMetadata_Generic::Validate() const
    6725 {
    6726  VMA_VALIDATE(!m_Suballocations.empty());
    6727 
    6728  // Expected offset of new suballocation as calculated from previous ones.
    6729  VkDeviceSize calculatedOffset = 0;
    6730  // Expected number of free suballocations as calculated from traversing their list.
    6731  uint32_t calculatedFreeCount = 0;
    6732  // Expected sum size of free suballocations as calculated from traversing their list.
    6733  VkDeviceSize calculatedSumFreeSize = 0;
    6734  // Expected number of free suballocations that should be registered in
    6735  // m_FreeSuballocationsBySize calculated from traversing their list.
    6736  size_t freeSuballocationsToRegister = 0;
    6737  // True if previous visited suballocation was free.
    6738  bool prevFree = false;
    6739 
    6740  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6741  suballocItem != m_Suballocations.cend();
    6742  ++suballocItem)
    6743  {
    6744  const VmaSuballocation& subAlloc = *suballocItem;
    6745 
    6746  // Actual offset of this suballocation doesn't match expected one.
    6747  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6748 
    6749  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6750  // Two adjacent free suballocations are invalid. They should be merged.
    6751  VMA_VALIDATE(!prevFree || !currFree);
    6752 
    6753  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6754 
    6755  if(currFree)
    6756  {
    6757  calculatedSumFreeSize += subAlloc.size;
    6758  ++calculatedFreeCount;
    6759  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6760  {
    6761  ++freeSuballocationsToRegister;
    6762  }
    6763 
    6764  // Margin required between allocations - every free space must be at least that large.
    6765  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6766  }
    6767  else
    6768  {
    6769  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6770  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6771 
    6772  // Margin required between allocations - previous allocation must be free.
    6773  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6774  }
    6775 
    6776  calculatedOffset += subAlloc.size;
    6777  prevFree = currFree;
    6778  }
    6779 
    6780  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6781  // match expected one.
    6782  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6783 
    6784  VkDeviceSize lastSize = 0;
    6785  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6786  {
    6787  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6788 
    6789  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6790  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6791  // They must be sorted by size ascending.
    6792  VMA_VALIDATE(suballocItem->size >= lastSize);
    6793 
    6794  lastSize = suballocItem->size;
    6795  }
    6796 
    6797  // Check if totals match calculacted values.
    6798  VMA_VALIDATE(ValidateFreeSuballocationList());
    6799  VMA_VALIDATE(calculatedOffset == GetSize());
    6800  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6801  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6802 
    6803  return true;
    6804 }
    6805 
    6806 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6807 {
    6808  if(!m_FreeSuballocationsBySize.empty())
    6809  {
    6810  return m_FreeSuballocationsBySize.back()->size;
    6811  }
    6812  else
    6813  {
    6814  return 0;
    6815  }
    6816 }
    6817 
    6818 bool VmaBlockMetadata_Generic::IsEmpty() const
    6819 {
    6820  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6821 }
    6822 
    6823 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6824 {
    6825  outInfo.blockCount = 1;
    6826 
    6827  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6828  outInfo.allocationCount = rangeCount - m_FreeCount;
    6829  outInfo.unusedRangeCount = m_FreeCount;
    6830 
    6831  outInfo.unusedBytes = m_SumFreeSize;
    6832  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6833 
    6834  outInfo.allocationSizeMin = UINT64_MAX;
    6835  outInfo.allocationSizeMax = 0;
    6836  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6837  outInfo.unusedRangeSizeMax = 0;
    6838 
    6839  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6840  suballocItem != m_Suballocations.cend();
    6841  ++suballocItem)
    6842  {
    6843  const VmaSuballocation& suballoc = *suballocItem;
    6844  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6845  {
    6846  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6847  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6848  }
    6849  else
    6850  {
    6851  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6852  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6853  }
    6854  }
    6855 }
    6856 
    6857 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6858 {
    6859  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6860 
    6861  inoutStats.size += GetSize();
    6862  inoutStats.unusedSize += m_SumFreeSize;
    6863  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6864  inoutStats.unusedRangeCount += m_FreeCount;
    6865  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6866 }
    6867 
    6868 #if VMA_STATS_STRING_ENABLED
    6869 
    6870 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6871 {
    6872  PrintDetailedMap_Begin(json,
    6873  m_SumFreeSize, // unusedBytes
    6874  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6875  m_FreeCount); // unusedRangeCount
    6876 
    6877  size_t i = 0;
    6878  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6879  suballocItem != m_Suballocations.cend();
    6880  ++suballocItem, ++i)
    6881  {
    6882  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6883  {
    6884  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6885  }
    6886  else
    6887  {
    6888  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6889  }
    6890  }
    6891 
    6892  PrintDetailedMap_End(json);
    6893 }
    6894 
    6895 #endif // #if VMA_STATS_STRING_ENABLED
    6896 
    6897 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6898  uint32_t currentFrameIndex,
    6899  uint32_t frameInUseCount,
    6900  VkDeviceSize bufferImageGranularity,
    6901  VkDeviceSize allocSize,
    6902  VkDeviceSize allocAlignment,
    6903  bool upperAddress,
    6904  VmaSuballocationType allocType,
    6905  bool canMakeOtherLost,
    6906  uint32_t strategy,
    6907  VmaAllocationRequest* pAllocationRequest)
    6908 {
    6909  VMA_ASSERT(allocSize > 0);
    6910  VMA_ASSERT(!upperAddress);
    6911  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6912  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6913  VMA_HEAVY_ASSERT(Validate());
    6914 
    6915  // There is not enough total free space in this block to fullfill the request: Early return.
    6916  if(canMakeOtherLost == false &&
    6917  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6918  {
    6919  return false;
    6920  }
    6921 
    6922  // New algorithm, efficiently searching freeSuballocationsBySize.
    6923  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6924  if(freeSuballocCount > 0)
    6925  {
    6927  {
    6928  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6929  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6930  m_FreeSuballocationsBySize.data(),
    6931  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6932  allocSize + 2 * VMA_DEBUG_MARGIN,
    6933  VmaSuballocationItemSizeLess());
    6934  size_t index = it - m_FreeSuballocationsBySize.data();
    6935  for(; index < freeSuballocCount; ++index)
    6936  {
    6937  if(CheckAllocation(
    6938  currentFrameIndex,
    6939  frameInUseCount,
    6940  bufferImageGranularity,
    6941  allocSize,
    6942  allocAlignment,
    6943  allocType,
    6944  m_FreeSuballocationsBySize[index],
    6945  false, // canMakeOtherLost
    6946  &pAllocationRequest->offset,
    6947  &pAllocationRequest->itemsToMakeLostCount,
    6948  &pAllocationRequest->sumFreeSize,
    6949  &pAllocationRequest->sumItemSize))
    6950  {
    6951  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6952  return true;
    6953  }
    6954  }
    6955  }
    6956  else // WORST_FIT, FIRST_FIT
    6957  {
    6958  // Search staring from biggest suballocations.
    6959  for(size_t index = freeSuballocCount; index--; )
    6960  {
    6961  if(CheckAllocation(
    6962  currentFrameIndex,
    6963  frameInUseCount,
    6964  bufferImageGranularity,
    6965  allocSize,
    6966  allocAlignment,
    6967  allocType,
    6968  m_FreeSuballocationsBySize[index],
    6969  false, // canMakeOtherLost
    6970  &pAllocationRequest->offset,
    6971  &pAllocationRequest->itemsToMakeLostCount,
    6972  &pAllocationRequest->sumFreeSize,
    6973  &pAllocationRequest->sumItemSize))
    6974  {
    6975  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6976  return true;
    6977  }
    6978  }
    6979  }
    6980  }
    6981 
    6982  if(canMakeOtherLost)
    6983  {
    6984  // Brute-force algorithm. TODO: Come up with something better.
    6985 
    6986  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6987  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6988 
    6989  VmaAllocationRequest tmpAllocRequest = {};
    6990  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    6991  suballocIt != m_Suballocations.end();
    6992  ++suballocIt)
    6993  {
    6994  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    6995  suballocIt->hAllocation->CanBecomeLost())
    6996  {
    6997  if(CheckAllocation(
    6998  currentFrameIndex,
    6999  frameInUseCount,
    7000  bufferImageGranularity,
    7001  allocSize,
    7002  allocAlignment,
    7003  allocType,
    7004  suballocIt,
    7005  canMakeOtherLost,
    7006  &tmpAllocRequest.offset,
    7007  &tmpAllocRequest.itemsToMakeLostCount,
    7008  &tmpAllocRequest.sumFreeSize,
    7009  &tmpAllocRequest.sumItemSize))
    7010  {
    7011  tmpAllocRequest.item = suballocIt;
    7012 
    7013  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7015  {
    7016  *pAllocationRequest = tmpAllocRequest;
    7017  }
    7018  }
    7019  }
    7020  }
    7021 
    7022  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7023  {
    7024  return true;
    7025  }
    7026  }
    7027 
    7028  return false;
    7029 }
    7030 
    7031 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7032  uint32_t currentFrameIndex,
    7033  uint32_t frameInUseCount,
    7034  VmaAllocationRequest* pAllocationRequest)
    7035 {
    7036  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7037  {
    7038  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7039  {
    7040  ++pAllocationRequest->item;
    7041  }
    7042  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7043  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7044  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7045  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7046  {
    7047  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7048  --pAllocationRequest->itemsToMakeLostCount;
    7049  }
    7050  else
    7051  {
    7052  return false;
    7053  }
    7054  }
    7055 
    7056  VMA_HEAVY_ASSERT(Validate());
    7057  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7058  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7059 
    7060  return true;
    7061 }
    7062 
    7063 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7064 {
    7065  uint32_t lostAllocationCount = 0;
    7066  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7067  it != m_Suballocations.end();
    7068  ++it)
    7069  {
    7070  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7071  it->hAllocation->CanBecomeLost() &&
    7072  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7073  {
    7074  it = FreeSuballocation(it);
    7075  ++lostAllocationCount;
    7076  }
    7077  }
    7078  return lostAllocationCount;
    7079 }
    7080 
    7081 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7082 {
    7083  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7084  it != m_Suballocations.end();
    7085  ++it)
    7086  {
    7087  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7088  {
    7089  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7090  {
    7091  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7092  return VK_ERROR_VALIDATION_FAILED_EXT;
    7093  }
    7094  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7095  {
    7096  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7097  return VK_ERROR_VALIDATION_FAILED_EXT;
    7098  }
    7099  }
    7100  }
    7101 
    7102  return VK_SUCCESS;
    7103 }
    7104 
    7105 void VmaBlockMetadata_Generic::Alloc(
    7106  const VmaAllocationRequest& request,
    7107  VmaSuballocationType type,
    7108  VkDeviceSize allocSize,
    7109  bool upperAddress,
    7110  VmaAllocation hAllocation)
    7111 {
    7112  VMA_ASSERT(!upperAddress);
    7113  VMA_ASSERT(request.item != m_Suballocations.end());
    7114  VmaSuballocation& suballoc = *request.item;
    7115  // Given suballocation is a free block.
    7116  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7117  // Given offset is inside this suballocation.
    7118  VMA_ASSERT(request.offset >= suballoc.offset);
    7119  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7120  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7121  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7122 
    7123  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7124  // it to become used.
    7125  UnregisterFreeSuballocation(request.item);
    7126 
    7127  suballoc.offset = request.offset;
    7128  suballoc.size = allocSize;
    7129  suballoc.type = type;
    7130  suballoc.hAllocation = hAllocation;
    7131 
    7132  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7133  if(paddingEnd)
    7134  {
    7135  VmaSuballocation paddingSuballoc = {};
    7136  paddingSuballoc.offset = request.offset + allocSize;
    7137  paddingSuballoc.size = paddingEnd;
    7138  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7139  VmaSuballocationList::iterator next = request.item;
    7140  ++next;
    7141  const VmaSuballocationList::iterator paddingEndItem =
    7142  m_Suballocations.insert(next, paddingSuballoc);
    7143  RegisterFreeSuballocation(paddingEndItem);
    7144  }
    7145 
    7146  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7147  if(paddingBegin)
    7148  {
    7149  VmaSuballocation paddingSuballoc = {};
    7150  paddingSuballoc.offset = request.offset - paddingBegin;
    7151  paddingSuballoc.size = paddingBegin;
    7152  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7153  const VmaSuballocationList::iterator paddingBeginItem =
    7154  m_Suballocations.insert(request.item, paddingSuballoc);
    7155  RegisterFreeSuballocation(paddingBeginItem);
    7156  }
    7157 
    7158  // Update totals.
    7159  m_FreeCount = m_FreeCount - 1;
    7160  if(paddingBegin > 0)
    7161  {
    7162  ++m_FreeCount;
    7163  }
    7164  if(paddingEnd > 0)
    7165  {
    7166  ++m_FreeCount;
    7167  }
    7168  m_SumFreeSize -= allocSize;
    7169 }
    7170 
    7171 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7172 {
    7173  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7174  suballocItem != m_Suballocations.end();
    7175  ++suballocItem)
    7176  {
    7177  VmaSuballocation& suballoc = *suballocItem;
    7178  if(suballoc.hAllocation == allocation)
    7179  {
    7180  FreeSuballocation(suballocItem);
    7181  VMA_HEAVY_ASSERT(Validate());
    7182  return;
    7183  }
    7184  }
    7185  VMA_ASSERT(0 && "Not found!");
    7186 }
    7187 
    7188 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7189 {
    7190  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7191  suballocItem != m_Suballocations.end();
    7192  ++suballocItem)
    7193  {
    7194  VmaSuballocation& suballoc = *suballocItem;
    7195  if(suballoc.offset == offset)
    7196  {
    7197  FreeSuballocation(suballocItem);
    7198  return;
    7199  }
    7200  }
    7201  VMA_ASSERT(0 && "Not found!");
    7202 }
    7203 
    7204 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7205 {
    7206  VkDeviceSize lastSize = 0;
    7207  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7208  {
    7209  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7210 
    7211  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7212  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7213  VMA_VALIDATE(it->size >= lastSize);
    7214  lastSize = it->size;
    7215  }
    7216  return true;
    7217 }
    7218 
    7219 bool VmaBlockMetadata_Generic::CheckAllocation(
    7220  uint32_t currentFrameIndex,
    7221  uint32_t frameInUseCount,
    7222  VkDeviceSize bufferImageGranularity,
    7223  VkDeviceSize allocSize,
    7224  VkDeviceSize allocAlignment,
    7225  VmaSuballocationType allocType,
    7226  VmaSuballocationList::const_iterator suballocItem,
    7227  bool canMakeOtherLost,
    7228  VkDeviceSize* pOffset,
    7229  size_t* itemsToMakeLostCount,
    7230  VkDeviceSize* pSumFreeSize,
    7231  VkDeviceSize* pSumItemSize) const
    7232 {
    7233  VMA_ASSERT(allocSize > 0);
    7234  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7235  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7236  VMA_ASSERT(pOffset != VMA_NULL);
    7237 
    7238  *itemsToMakeLostCount = 0;
    7239  *pSumFreeSize = 0;
    7240  *pSumItemSize = 0;
    7241 
    7242  if(canMakeOtherLost)
    7243  {
    7244  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7245  {
    7246  *pSumFreeSize = suballocItem->size;
    7247  }
    7248  else
    7249  {
    7250  if(suballocItem->hAllocation->CanBecomeLost() &&
    7251  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7252  {
    7253  ++*itemsToMakeLostCount;
    7254  *pSumItemSize = suballocItem->size;
    7255  }
    7256  else
    7257  {
    7258  return false;
    7259  }
    7260  }
    7261 
    7262  // Remaining size is too small for this request: Early return.
    7263  if(GetSize() - suballocItem->offset < allocSize)
    7264  {
    7265  return false;
    7266  }
    7267 
    7268  // Start from offset equal to beginning of this suballocation.
    7269  *pOffset = suballocItem->offset;
    7270 
    7271  // Apply VMA_DEBUG_MARGIN at the beginning.
    7272  if(VMA_DEBUG_MARGIN > 0)
    7273  {
    7274  *pOffset += VMA_DEBUG_MARGIN;
    7275  }
    7276 
    7277  // Apply alignment.
    7278  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7279 
    7280  // Check previous suballocations for BufferImageGranularity conflicts.
    7281  // Make bigger alignment if necessary.
    7282  if(bufferImageGranularity > 1)
    7283  {
    7284  bool bufferImageGranularityConflict = false;
    7285  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7286  while(prevSuballocItem != m_Suballocations.cbegin())
    7287  {
    7288  --prevSuballocItem;
    7289  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7290  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7291  {
    7292  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7293  {
    7294  bufferImageGranularityConflict = true;
    7295  break;
    7296  }
    7297  }
    7298  else
    7299  // Already on previous page.
    7300  break;
    7301  }
    7302  if(bufferImageGranularityConflict)
    7303  {
    7304  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7305  }
    7306  }
    7307 
    7308  // Now that we have final *pOffset, check if we are past suballocItem.
    7309  // If yes, return false - this function should be called for another suballocItem as starting point.
    7310  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7311  {
    7312  return false;
    7313  }
    7314 
    7315  // Calculate padding at the beginning based on current offset.
    7316  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7317 
    7318  // Calculate required margin at the end.
    7319  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7320 
    7321  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7322  // Another early return check.
    7323  if(suballocItem->offset + totalSize > GetSize())
    7324  {
    7325  return false;
    7326  }
    7327 
    7328  // Advance lastSuballocItem until desired size is reached.
    7329  // Update itemsToMakeLostCount.
    7330  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7331  if(totalSize > suballocItem->size)
    7332  {
    7333  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7334  while(remainingSize > 0)
    7335  {
    7336  ++lastSuballocItem;
    7337  if(lastSuballocItem == m_Suballocations.cend())
    7338  {
    7339  return false;
    7340  }
    7341  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7342  {
    7343  *pSumFreeSize += lastSuballocItem->size;
    7344  }
    7345  else
    7346  {
    7347  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7348  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7349  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7350  {
    7351  ++*itemsToMakeLostCount;
    7352  *pSumItemSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  return false;
    7357  }
    7358  }
    7359  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7360  remainingSize - lastSuballocItem->size : 0;
    7361  }
    7362  }
    7363 
    7364  // Check next suballocations for BufferImageGranularity conflicts.
    7365  // If conflict exists, we must mark more allocations lost or fail.
    7366  if(bufferImageGranularity > 1)
    7367  {
    7368  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7369  ++nextSuballocItem;
    7370  while(nextSuballocItem != m_Suballocations.cend())
    7371  {
    7372  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7373  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7374  {
    7375  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7376  {
    7377  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7378  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7379  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7380  {
    7381  ++*itemsToMakeLostCount;
    7382  }
    7383  else
    7384  {
    7385  return false;
    7386  }
    7387  }
    7388  }
    7389  else
    7390  {
    7391  // Already on next page.
    7392  break;
    7393  }
    7394  ++nextSuballocItem;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  const VmaSuballocation& suballoc = *suballocItem;
    7401  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7402 
    7403  *pSumFreeSize = suballoc.size;
    7404 
    7405  // Size of this suballocation is too small for this request: Early return.
    7406  if(suballoc.size < allocSize)
    7407  {
    7408  return false;
    7409  }
    7410 
    7411  // Start from offset equal to beginning of this suballocation.
    7412  *pOffset = suballoc.offset;
    7413 
    7414  // Apply VMA_DEBUG_MARGIN at the beginning.
    7415  if(VMA_DEBUG_MARGIN > 0)
    7416  {
    7417  *pOffset += VMA_DEBUG_MARGIN;
    7418  }
    7419 
    7420  // Apply alignment.
    7421  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7422 
    7423  // Check previous suballocations for BufferImageGranularity conflicts.
    7424  // Make bigger alignment if necessary.
    7425  if(bufferImageGranularity > 1)
    7426  {
    7427  bool bufferImageGranularityConflict = false;
    7428  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7429  while(prevSuballocItem != m_Suballocations.cbegin())
    7430  {
    7431  --prevSuballocItem;
    7432  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7433  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7434  {
    7435  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7436  {
    7437  bufferImageGranularityConflict = true;
    7438  break;
    7439  }
    7440  }
    7441  else
    7442  // Already on previous page.
    7443  break;
    7444  }
    7445  if(bufferImageGranularityConflict)
    7446  {
    7447  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7448  }
    7449  }
    7450 
    7451  // Calculate padding at the beginning based on current offset.
    7452  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7453 
    7454  // Calculate required margin at the end.
    7455  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7456 
    7457  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7458  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7459  {
    7460  return false;
    7461  }
    7462 
    7463  // Check next suballocations for BufferImageGranularity conflicts.
    7464  // If conflict exists, allocation cannot be made here.
    7465  if(bufferImageGranularity > 1)
    7466  {
    7467  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7468  ++nextSuballocItem;
    7469  while(nextSuballocItem != m_Suballocations.cend())
    7470  {
    7471  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7472  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7473  {
    7474  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7475  {
    7476  return false;
    7477  }
    7478  }
    7479  else
    7480  {
    7481  // Already on next page.
    7482  break;
    7483  }
    7484  ++nextSuballocItem;
    7485  }
    7486  }
    7487  }
    7488 
    7489  // All tests passed: Success. pOffset is already filled.
    7490  return true;
    7491 }
    7492 
    7493 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7494 {
    7495  VMA_ASSERT(item != m_Suballocations.end());
    7496  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7497 
    7498  VmaSuballocationList::iterator nextItem = item;
    7499  ++nextItem;
    7500  VMA_ASSERT(nextItem != m_Suballocations.end());
    7501  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7502 
    7503  item->size += nextItem->size;
    7504  --m_FreeCount;
    7505  m_Suballocations.erase(nextItem);
    7506 }
    7507 
    7508 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7509 {
    7510  // Change this suballocation to be marked as free.
    7511  VmaSuballocation& suballoc = *suballocItem;
    7512  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7513  suballoc.hAllocation = VK_NULL_HANDLE;
    7514 
    7515  // Update totals.
    7516  ++m_FreeCount;
    7517  m_SumFreeSize += suballoc.size;
    7518 
    7519  // Merge with previous and/or next suballocation if it's also free.
    7520  bool mergeWithNext = false;
    7521  bool mergeWithPrev = false;
    7522 
    7523  VmaSuballocationList::iterator nextItem = suballocItem;
    7524  ++nextItem;
    7525  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7526  {
    7527  mergeWithNext = true;
    7528  }
    7529 
    7530  VmaSuballocationList::iterator prevItem = suballocItem;
    7531  if(suballocItem != m_Suballocations.begin())
    7532  {
    7533  --prevItem;
    7534  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7535  {
    7536  mergeWithPrev = true;
    7537  }
    7538  }
    7539 
    7540  if(mergeWithNext)
    7541  {
    7542  UnregisterFreeSuballocation(nextItem);
    7543  MergeFreeWithNext(suballocItem);
    7544  }
    7545 
    7546  if(mergeWithPrev)
    7547  {
    7548  UnregisterFreeSuballocation(prevItem);
    7549  MergeFreeWithNext(prevItem);
    7550  RegisterFreeSuballocation(prevItem);
    7551  return prevItem;
    7552  }
    7553  else
    7554  {
    7555  RegisterFreeSuballocation(suballocItem);
    7556  return suballocItem;
    7557  }
    7558 }
    7559 
    7560 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7561 {
    7562  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7563  VMA_ASSERT(item->size > 0);
    7564 
    7565  // You may want to enable this validation at the beginning or at the end of
    7566  // this function, depending on what do you want to check.
    7567  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7568 
    7569  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7570  {
    7571  if(m_FreeSuballocationsBySize.empty())
    7572  {
    7573  m_FreeSuballocationsBySize.push_back(item);
    7574  }
    7575  else
    7576  {
    7577  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7578  }
    7579  }
    7580 
    7581  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7582 }
    7583 
    7584 
    7585 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7586 {
    7587  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7588  VMA_ASSERT(item->size > 0);
    7589 
    7590  // You may want to enable this validation at the beginning or at the end of
    7591  // this function, depending on what do you want to check.
    7592  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7593 
    7594  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7595  {
    7596  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7597  m_FreeSuballocationsBySize.data(),
    7598  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7599  item,
    7600  VmaSuballocationItemSizeLess());
    7601  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7602  index < m_FreeSuballocationsBySize.size();
    7603  ++index)
    7604  {
    7605  if(m_FreeSuballocationsBySize[index] == item)
    7606  {
    7607  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7608  return;
    7609  }
    7610  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7611  }
    7612  VMA_ASSERT(0 && "Not found.");
    7613  }
    7614 
    7615  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7616 }
    7617 
    7619 // class VmaBlockMetadata_Linear
    7620 
    7621 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7622  VmaBlockMetadata(hAllocator),
    7623  m_SumFreeSize(0),
    7624  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7625  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7626  m_1stVectorIndex(0),
    7627  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7628  m_1stNullItemsBeginCount(0),
    7629  m_1stNullItemsMiddleCount(0),
    7630  m_2ndNullItemsCount(0)
    7631 {
    7632 }
    7633 
    7634 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7635 {
    7636 }
    7637 
    7638 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7639 {
    7640  VmaBlockMetadata::Init(size);
    7641  m_SumFreeSize = size;
    7642 }
    7643 
    7644 bool VmaBlockMetadata_Linear::Validate() const
    7645 {
    7646  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7647  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7648 
    7649  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7650  VMA_VALIDATE(!suballocations1st.empty() ||
    7651  suballocations2nd.empty() ||
    7652  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7653 
    7654  if(!suballocations1st.empty())
    7655  {
    7656  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7657  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7658  // Null item at the end should be just pop_back().
    7659  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7660  }
    7661  if(!suballocations2nd.empty())
    7662  {
    7663  // Null item at the end should be just pop_back().
    7664  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7665  }
    7666 
    7667  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7668  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7669 
    7670  VkDeviceSize sumUsedSize = 0;
    7671  const size_t suballoc1stCount = suballocations1st.size();
    7672  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7673 
    7674  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7675  {
    7676  const size_t suballoc2ndCount = suballocations2nd.size();
    7677  size_t nullItem2ndCount = 0;
    7678  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7679  {
    7680  const VmaSuballocation& suballoc = suballocations2nd[i];
    7681  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7682 
    7683  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7684  VMA_VALIDATE(suballoc.offset >= offset);
    7685 
    7686  if(!currFree)
    7687  {
    7688  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7689  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7690  sumUsedSize += suballoc.size;
    7691  }
    7692  else
    7693  {
    7694  ++nullItem2ndCount;
    7695  }
    7696 
    7697  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7698  }
    7699 
    7700  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7701  }
    7702 
    7703  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7704  {
    7705  const VmaSuballocation& suballoc = suballocations1st[i];
    7706  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7707  suballoc.hAllocation == VK_NULL_HANDLE);
    7708  }
    7709 
    7710  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7711 
    7712  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7716 
    7717  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7718  VMA_VALIDATE(suballoc.offset >= offset);
    7719  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7720 
    7721  if(!currFree)
    7722  {
    7723  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7724  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7725  sumUsedSize += suballoc.size;
    7726  }
    7727  else
    7728  {
    7729  ++nullItem1stCount;
    7730  }
    7731 
    7732  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7733  }
    7734  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7735 
    7736  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7737  {
    7738  const size_t suballoc2ndCount = suballocations2nd.size();
    7739  size_t nullItem2ndCount = 0;
    7740  for(size_t i = suballoc2ndCount; i--; )
    7741  {
    7742  const VmaSuballocation& suballoc = suballocations2nd[i];
    7743  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7744 
    7745  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7746  VMA_VALIDATE(suballoc.offset >= offset);
    7747 
    7748  if(!currFree)
    7749  {
    7750  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7751  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7752  sumUsedSize += suballoc.size;
    7753  }
    7754  else
    7755  {
    7756  ++nullItem2ndCount;
    7757  }
    7758 
    7759  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7760  }
    7761 
    7762  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7763  }
    7764 
    7765  VMA_VALIDATE(offset <= GetSize());
    7766  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7767 
    7768  return true;
    7769 }
    7770 
    7771 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7772 {
    7773  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7774  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7775 }
    7776 
    7777 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7778 {
    7779  const VkDeviceSize size = GetSize();
    7780 
    7781  /*
    7782  We don't consider gaps inside allocation vectors with freed allocations because
    7783  they are not suitable for reuse in linear allocator. We consider only space that
    7784  is available for new allocations.
    7785  */
    7786  if(IsEmpty())
    7787  {
    7788  return size;
    7789  }
    7790 
    7791  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7792 
    7793  switch(m_2ndVectorMode)
    7794  {
    7795  case SECOND_VECTOR_EMPTY:
    7796  /*
    7797  Available space is after end of 1st, as well as before beginning of 1st (which
    7798  whould make it a ring buffer).
    7799  */
    7800  {
    7801  const size_t suballocations1stCount = suballocations1st.size();
    7802  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7803  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7804  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7805  return VMA_MAX(
    7806  firstSuballoc.offset,
    7807  size - (lastSuballoc.offset + lastSuballoc.size));
    7808  }
    7809  break;
    7810 
    7811  case SECOND_VECTOR_RING_BUFFER:
    7812  /*
    7813  Available space is only between end of 2nd and beginning of 1st.
    7814  */
    7815  {
    7816  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7817  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7818  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7819  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7820  }
    7821  break;
    7822 
    7823  case SECOND_VECTOR_DOUBLE_STACK:
    7824  /*
    7825  Available space is only between end of 1st and top of 2nd.
    7826  */
    7827  {
    7828  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7829  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7830  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7831  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7832  }
    7833  break;
    7834 
    7835  default:
    7836  VMA_ASSERT(0);
    7837  return 0;
    7838  }
    7839 }
    7840 
    7841 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7842 {
    7843  const VkDeviceSize size = GetSize();
    7844  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7845  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7846  const size_t suballoc1stCount = suballocations1st.size();
    7847  const size_t suballoc2ndCount = suballocations2nd.size();
    7848 
    7849  outInfo.blockCount = 1;
    7850  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7851  outInfo.unusedRangeCount = 0;
    7852  outInfo.usedBytes = 0;
    7853  outInfo.allocationSizeMin = UINT64_MAX;
    7854  outInfo.allocationSizeMax = 0;
    7855  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7856  outInfo.unusedRangeSizeMax = 0;
    7857 
    7858  VkDeviceSize lastOffset = 0;
    7859 
    7860  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7861  {
    7862  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7863  size_t nextAlloc2ndIndex = 0;
    7864  while(lastOffset < freeSpace2ndTo1stEnd)
    7865  {
    7866  // Find next non-null allocation or move nextAllocIndex to the end.
    7867  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7868  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7869  {
    7870  ++nextAlloc2ndIndex;
    7871  }
    7872 
    7873  // Found non-null allocation.
    7874  if(nextAlloc2ndIndex < suballoc2ndCount)
    7875  {
    7876  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7877 
    7878  // 1. Process free space before this allocation.
    7879  if(lastOffset < suballoc.offset)
    7880  {
    7881  // There is free space from lastOffset to suballoc.offset.
    7882  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7883  ++outInfo.unusedRangeCount;
    7884  outInfo.unusedBytes += unusedRangeSize;
    7885  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7886  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7887  }
    7888 
    7889  // 2. Process this allocation.
    7890  // There is allocation with suballoc.offset, suballoc.size.
    7891  outInfo.usedBytes += suballoc.size;
    7892  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7893  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7894 
    7895  // 3. Prepare for next iteration.
    7896  lastOffset = suballoc.offset + suballoc.size;
    7897  ++nextAlloc2ndIndex;
    7898  }
    7899  // We are at the end.
    7900  else
    7901  {
    7902  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7903  if(lastOffset < freeSpace2ndTo1stEnd)
    7904  {
    7905  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7906  ++outInfo.unusedRangeCount;
    7907  outInfo.unusedBytes += unusedRangeSize;
    7908  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7909  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7910  }
    7911 
    7912  // End of loop.
    7913  lastOffset = freeSpace2ndTo1stEnd;
    7914  }
    7915  }
    7916  }
    7917 
    7918  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7919  const VkDeviceSize freeSpace1stTo2ndEnd =
    7920  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7921  while(lastOffset < freeSpace1stTo2ndEnd)
    7922  {
    7923  // Find next non-null allocation or move nextAllocIndex to the end.
    7924  while(nextAlloc1stIndex < suballoc1stCount &&
    7925  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7926  {
    7927  ++nextAlloc1stIndex;
    7928  }
    7929 
    7930  // Found non-null allocation.
    7931  if(nextAlloc1stIndex < suballoc1stCount)
    7932  {
    7933  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7934 
    7935  // 1. Process free space before this allocation.
    7936  if(lastOffset < suballoc.offset)
    7937  {
    7938  // There is free space from lastOffset to suballoc.offset.
    7939  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7940  ++outInfo.unusedRangeCount;
    7941  outInfo.unusedBytes += unusedRangeSize;
    7942  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7943  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7944  }
    7945 
    7946  // 2. Process this allocation.
    7947  // There is allocation with suballoc.offset, suballoc.size.
    7948  outInfo.usedBytes += suballoc.size;
    7949  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7950  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7951 
    7952  // 3. Prepare for next iteration.
    7953  lastOffset = suballoc.offset + suballoc.size;
    7954  ++nextAlloc1stIndex;
    7955  }
    7956  // We are at the end.
    7957  else
    7958  {
    7959  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7960  if(lastOffset < freeSpace1stTo2ndEnd)
    7961  {
    7962  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7963  ++outInfo.unusedRangeCount;
    7964  outInfo.unusedBytes += unusedRangeSize;
    7965  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7966  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7967  }
    7968 
    7969  // End of loop.
    7970  lastOffset = freeSpace1stTo2ndEnd;
    7971  }
    7972  }
    7973 
    7974  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7975  {
    7976  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7977  while(lastOffset < size)
    7978  {
    7979  // Find next non-null allocation or move nextAllocIndex to the end.
    7980  while(nextAlloc2ndIndex != SIZE_MAX &&
    7981  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7982  {
    7983  --nextAlloc2ndIndex;
    7984  }
    7985 
    7986  // Found non-null allocation.
    7987  if(nextAlloc2ndIndex != SIZE_MAX)
    7988  {
    7989  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7990 
    7991  // 1. Process free space before this allocation.
    7992  if(lastOffset < suballoc.offset)
    7993  {
    7994  // There is free space from lastOffset to suballoc.offset.
    7995  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7996  ++outInfo.unusedRangeCount;
    7997  outInfo.unusedBytes += unusedRangeSize;
    7998  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7999  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8000  }
    8001 
    8002  // 2. Process this allocation.
    8003  // There is allocation with suballoc.offset, suballoc.size.
    8004  outInfo.usedBytes += suballoc.size;
    8005  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8006  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8007 
    8008  // 3. Prepare for next iteration.
    8009  lastOffset = suballoc.offset + suballoc.size;
    8010  --nextAlloc2ndIndex;
    8011  }
    8012  // We are at the end.
    8013  else
    8014  {
    8015  // There is free space from lastOffset to size.
    8016  if(lastOffset < size)
    8017  {
    8018  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8019  ++outInfo.unusedRangeCount;
    8020  outInfo.unusedBytes += unusedRangeSize;
    8021  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8022  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8023  }
    8024 
    8025  // End of loop.
    8026  lastOffset = size;
    8027  }
    8028  }
    8029  }
    8030 
    8031  outInfo.unusedBytes = size - outInfo.usedBytes;
    8032 }
    8033 
    8034 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8035 {
    8036  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8037  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8038  const VkDeviceSize size = GetSize();
    8039  const size_t suballoc1stCount = suballocations1st.size();
    8040  const size_t suballoc2ndCount = suballocations2nd.size();
    8041 
    8042  inoutStats.size += size;
    8043 
    8044  VkDeviceSize lastOffset = 0;
    8045 
    8046  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8047  {
    8048  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8049  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8050  while(lastOffset < freeSpace2ndTo1stEnd)
    8051  {
    8052  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8053  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8054  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8055  {
    8056  ++nextAlloc2ndIndex;
    8057  }
    8058 
    8059  // Found non-null allocation.
    8060  if(nextAlloc2ndIndex < suballoc2ndCount)
    8061  {
    8062  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8063 
    8064  // 1. Process free space before this allocation.
    8065  if(lastOffset < suballoc.offset)
    8066  {
    8067  // There is free space from lastOffset to suballoc.offset.
    8068  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8069  inoutStats.unusedSize += unusedRangeSize;
    8070  ++inoutStats.unusedRangeCount;
    8071  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8072  }
    8073 
    8074  // 2. Process this allocation.
    8075  // There is allocation with suballoc.offset, suballoc.size.
    8076  ++inoutStats.allocationCount;
    8077 
    8078  // 3. Prepare for next iteration.
    8079  lastOffset = suballoc.offset + suballoc.size;
    8080  ++nextAlloc2ndIndex;
    8081  }
    8082  // We are at the end.
    8083  else
    8084  {
    8085  if(lastOffset < freeSpace2ndTo1stEnd)
    8086  {
    8087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8088  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8089  inoutStats.unusedSize += unusedRangeSize;
    8090  ++inoutStats.unusedRangeCount;
    8091  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8092  }
    8093 
    8094  // End of loop.
    8095  lastOffset = freeSpace2ndTo1stEnd;
    8096  }
    8097  }
    8098  }
    8099 
    8100  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8101  const VkDeviceSize freeSpace1stTo2ndEnd =
    8102  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8103  while(lastOffset < freeSpace1stTo2ndEnd)
    8104  {
    8105  // Find next non-null allocation or move nextAllocIndex to the end.
    8106  while(nextAlloc1stIndex < suballoc1stCount &&
    8107  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8108  {
    8109  ++nextAlloc1stIndex;
    8110  }
    8111 
    8112  // Found non-null allocation.
    8113  if(nextAlloc1stIndex < suballoc1stCount)
    8114  {
    8115  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8116 
    8117  // 1. Process free space before this allocation.
    8118  if(lastOffset < suballoc.offset)
    8119  {
    8120  // There is free space from lastOffset to suballoc.offset.
    8121  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8122  inoutStats.unusedSize += unusedRangeSize;
    8123  ++inoutStats.unusedRangeCount;
    8124  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8125  }
    8126 
    8127  // 2. Process this allocation.
    8128  // There is allocation with suballoc.offset, suballoc.size.
    8129  ++inoutStats.allocationCount;
    8130 
    8131  // 3. Prepare for next iteration.
    8132  lastOffset = suballoc.offset + suballoc.size;
    8133  ++nextAlloc1stIndex;
    8134  }
    8135  // We are at the end.
    8136  else
    8137  {
    8138  if(lastOffset < freeSpace1stTo2ndEnd)
    8139  {
    8140  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8141  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8142  inoutStats.unusedSize += unusedRangeSize;
    8143  ++inoutStats.unusedRangeCount;
    8144  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8145  }
    8146 
    8147  // End of loop.
    8148  lastOffset = freeSpace1stTo2ndEnd;
    8149  }
    8150  }
    8151 
    8152  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8153  {
    8154  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8155  while(lastOffset < size)
    8156  {
    8157  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8158  while(nextAlloc2ndIndex != SIZE_MAX &&
    8159  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8160  {
    8161  --nextAlloc2ndIndex;
    8162  }
    8163 
    8164  // Found non-null allocation.
    8165  if(nextAlloc2ndIndex != SIZE_MAX)
    8166  {
    8167  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8168 
    8169  // 1. Process free space before this allocation.
    8170  if(lastOffset < suballoc.offset)
    8171  {
    8172  // There is free space from lastOffset to suballoc.offset.
    8173  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8174  inoutStats.unusedSize += unusedRangeSize;
    8175  ++inoutStats.unusedRangeCount;
    8176  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8177  }
    8178 
    8179  // 2. Process this allocation.
    8180  // There is allocation with suballoc.offset, suballoc.size.
    8181  ++inoutStats.allocationCount;
    8182 
    8183  // 3. Prepare for next iteration.
    8184  lastOffset = suballoc.offset + suballoc.size;
    8185  --nextAlloc2ndIndex;
    8186  }
    8187  // We are at the end.
    8188  else
    8189  {
    8190  if(lastOffset < size)
    8191  {
    8192  // There is free space from lastOffset to size.
    8193  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8194  inoutStats.unusedSize += unusedRangeSize;
    8195  ++inoutStats.unusedRangeCount;
    8196  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8197  }
    8198 
    8199  // End of loop.
    8200  lastOffset = size;
    8201  }
    8202  }
    8203  }
    8204 }
    8205 
    8206 #if VMA_STATS_STRING_ENABLED
    8207 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8208 {
    8209  const VkDeviceSize size = GetSize();
    8210  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8211  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8212  const size_t suballoc1stCount = suballocations1st.size();
    8213  const size_t suballoc2ndCount = suballocations2nd.size();
    8214 
    8215  // FIRST PASS
    8216 
    8217  size_t unusedRangeCount = 0;
    8218  VkDeviceSize usedBytes = 0;
    8219 
    8220  VkDeviceSize lastOffset = 0;
    8221 
    8222  size_t alloc2ndCount = 0;
    8223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8224  {
    8225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8226  size_t nextAlloc2ndIndex = 0;
    8227  while(lastOffset < freeSpace2ndTo1stEnd)
    8228  {
    8229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8230  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8232  {
    8233  ++nextAlloc2ndIndex;
    8234  }
    8235 
    8236  // Found non-null allocation.
    8237  if(nextAlloc2ndIndex < suballoc2ndCount)
    8238  {
    8239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8240 
    8241  // 1. Process free space before this allocation.
    8242  if(lastOffset < suballoc.offset)
    8243  {
    8244  // There is free space from lastOffset to suballoc.offset.
    8245  ++unusedRangeCount;
    8246  }
    8247 
    8248  // 2. Process this allocation.
    8249  // There is allocation with suballoc.offset, suballoc.size.
    8250  ++alloc2ndCount;
    8251  usedBytes += suballoc.size;
    8252 
    8253  // 3. Prepare for next iteration.
    8254  lastOffset = suballoc.offset + suballoc.size;
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257  // We are at the end.
    8258  else
    8259  {
    8260  if(lastOffset < freeSpace2ndTo1stEnd)
    8261  {
    8262  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8263  ++unusedRangeCount;
    8264  }
    8265 
    8266  // End of loop.
    8267  lastOffset = freeSpace2ndTo1stEnd;
    8268  }
    8269  }
    8270  }
    8271 
    8272  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8273  size_t alloc1stCount = 0;
    8274  const VkDeviceSize freeSpace1stTo2ndEnd =
    8275  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8276  while(lastOffset < freeSpace1stTo2ndEnd)
    8277  {
    8278  // Find next non-null allocation or move nextAllocIndex to the end.
    8279  while(nextAlloc1stIndex < suballoc1stCount &&
    8280  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8281  {
    8282  ++nextAlloc1stIndex;
    8283  }
    8284 
    8285  // Found non-null allocation.
    8286  if(nextAlloc1stIndex < suballoc1stCount)
    8287  {
    8288  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8289 
    8290  // 1. Process free space before this allocation.
    8291  if(lastOffset < suballoc.offset)
    8292  {
    8293  // There is free space from lastOffset to suballoc.offset.
    8294  ++unusedRangeCount;
    8295  }
    8296 
    8297  // 2. Process this allocation.
    8298  // There is allocation with suballoc.offset, suballoc.size.
    8299  ++alloc1stCount;
    8300  usedBytes += suballoc.size;
    8301 
    8302  // 3. Prepare for next iteration.
    8303  lastOffset = suballoc.offset + suballoc.size;
    8304  ++nextAlloc1stIndex;
    8305  }
    8306  // We are at the end.
    8307  else
    8308  {
    8309  if(lastOffset < size)
    8310  {
    8311  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8312  ++unusedRangeCount;
    8313  }
    8314 
    8315  // End of loop.
    8316  lastOffset = freeSpace1stTo2ndEnd;
    8317  }
    8318  }
    8319 
    8320  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8321  {
    8322  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8323  while(lastOffset < size)
    8324  {
    8325  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8326  while(nextAlloc2ndIndex != SIZE_MAX &&
    8327  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8328  {
    8329  --nextAlloc2ndIndex;
    8330  }
    8331 
    8332  // Found non-null allocation.
    8333  if(nextAlloc2ndIndex != SIZE_MAX)
    8334  {
    8335  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8336 
    8337  // 1. Process free space before this allocation.
    8338  if(lastOffset < suballoc.offset)
    8339  {
    8340  // There is free space from lastOffset to suballoc.offset.
    8341  ++unusedRangeCount;
    8342  }
    8343 
    8344  // 2. Process this allocation.
    8345  // There is allocation with suballoc.offset, suballoc.size.
    8346  ++alloc2ndCount;
    8347  usedBytes += suballoc.size;
    8348 
    8349  // 3. Prepare for next iteration.
    8350  lastOffset = suballoc.offset + suballoc.size;
    8351  --nextAlloc2ndIndex;
    8352  }
    8353  // We are at the end.
    8354  else
    8355  {
    8356  if(lastOffset < size)
    8357  {
    8358  // There is free space from lastOffset to size.
    8359  ++unusedRangeCount;
    8360  }
    8361 
    8362  // End of loop.
    8363  lastOffset = size;
    8364  }
    8365  }
    8366  }
    8367 
    8368  const VkDeviceSize unusedBytes = size - usedBytes;
    8369  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8370 
    8371  // SECOND PASS
    8372  lastOffset = 0;
    8373 
    8374  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8375  {
    8376  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8377  size_t nextAlloc2ndIndex = 0;
    8378  while(lastOffset < freeSpace2ndTo1stEnd)
    8379  {
    8380  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8381  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8382  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8383  {
    8384  ++nextAlloc2ndIndex;
    8385  }
    8386 
    8387  // Found non-null allocation.
    8388  if(nextAlloc2ndIndex < suballoc2ndCount)
    8389  {
    8390  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8391 
    8392  // 1. Process free space before this allocation.
    8393  if(lastOffset < suballoc.offset)
    8394  {
    8395  // There is free space from lastOffset to suballoc.offset.
    8396  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8397  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8398  }
    8399 
    8400  // 2. Process this allocation.
    8401  // There is allocation with suballoc.offset, suballoc.size.
    8402  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8403 
    8404  // 3. Prepare for next iteration.
    8405  lastOffset = suballoc.offset + suballoc.size;
    8406  ++nextAlloc2ndIndex;
    8407  }
    8408  // We are at the end.
    8409  else
    8410  {
    8411  if(lastOffset < freeSpace2ndTo1stEnd)
    8412  {
    8413  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8414  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8415  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8416  }
    8417 
    8418  // End of loop.
    8419  lastOffset = freeSpace2ndTo1stEnd;
    8420  }
    8421  }
    8422  }
    8423 
    8424  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8425  while(lastOffset < freeSpace1stTo2ndEnd)
    8426  {
    8427  // Find next non-null allocation or move nextAllocIndex to the end.
    8428  while(nextAlloc1stIndex < suballoc1stCount &&
    8429  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8430  {
    8431  ++nextAlloc1stIndex;
    8432  }
    8433 
    8434  // Found non-null allocation.
    8435  if(nextAlloc1stIndex < suballoc1stCount)
    8436  {
    8437  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8438 
    8439  // 1. Process free space before this allocation.
    8440  if(lastOffset < suballoc.offset)
    8441  {
    8442  // There is free space from lastOffset to suballoc.offset.
    8443  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8444  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8450 
    8451  // 3. Prepare for next iteration.
    8452  lastOffset = suballoc.offset + suballoc.size;
    8453  ++nextAlloc1stIndex;
    8454  }
    8455  // We are at the end.
    8456  else
    8457  {
    8458  if(lastOffset < freeSpace1stTo2ndEnd)
    8459  {
    8460  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8461  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8462  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace1stTo2ndEnd;
    8467  }
    8468  }
    8469 
    8470  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8471  {
    8472  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8473  while(lastOffset < size)
    8474  {
    8475  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8476  while(nextAlloc2ndIndex != SIZE_MAX &&
    8477  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8478  {
    8479  --nextAlloc2ndIndex;
    8480  }
    8481 
    8482  // Found non-null allocation.
    8483  if(nextAlloc2ndIndex != SIZE_MAX)
    8484  {
    8485  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8486 
    8487  // 1. Process free space before this allocation.
    8488  if(lastOffset < suballoc.offset)
    8489  {
    8490  // There is free space from lastOffset to suballoc.offset.
    8491  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8492  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8493  }
    8494 
    8495  // 2. Process this allocation.
    8496  // There is allocation with suballoc.offset, suballoc.size.
    8497  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8498 
    8499  // 3. Prepare for next iteration.
    8500  lastOffset = suballoc.offset + suballoc.size;
    8501  --nextAlloc2ndIndex;
    8502  }
    8503  // We are at the end.
    8504  else
    8505  {
    8506  if(lastOffset < size)
    8507  {
    8508  // There is free space from lastOffset to size.
    8509  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8510  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8511  }
    8512 
    8513  // End of loop.
    8514  lastOffset = size;
    8515  }
    8516  }
    8517  }
    8518 
    8519  PrintDetailedMap_End(json);
    8520 }
    8521 #endif // #if VMA_STATS_STRING_ENABLED
    8522 
    8523 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8524  uint32_t currentFrameIndex,
    8525  uint32_t frameInUseCount,
    8526  VkDeviceSize bufferImageGranularity,
    8527  VkDeviceSize allocSize,
    8528  VkDeviceSize allocAlignment,
    8529  bool upperAddress,
    8530  VmaSuballocationType allocType,
    8531  bool canMakeOtherLost,
    8532  uint32_t strategy,
    8533  VmaAllocationRequest* pAllocationRequest)
    8534 {
    8535  VMA_ASSERT(allocSize > 0);
    8536  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8537  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8538  VMA_HEAVY_ASSERT(Validate());
    8539 
    8540  const VkDeviceSize size = GetSize();
    8541  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8542  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8543 
    8544  if(upperAddress)
    8545  {
    8546  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8547  {
    8548  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8549  return false;
    8550  }
    8551 
    8552  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8553  if(allocSize > size)
    8554  {
    8555  return false;
    8556  }
    8557  VkDeviceSize resultBaseOffset = size - allocSize;
    8558  if(!suballocations2nd.empty())
    8559  {
    8560  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8561  resultBaseOffset = lastSuballoc.offset - allocSize;
    8562  if(allocSize > lastSuballoc.offset)
    8563  {
    8564  return false;
    8565  }
    8566  }
    8567 
    8568  // Start from offset equal to end of free space.
    8569  VkDeviceSize resultOffset = resultBaseOffset;
    8570 
    8571  // Apply VMA_DEBUG_MARGIN at the end.
    8572  if(VMA_DEBUG_MARGIN > 0)
    8573  {
    8574  if(resultOffset < VMA_DEBUG_MARGIN)
    8575  {
    8576  return false;
    8577  }
    8578  resultOffset -= VMA_DEBUG_MARGIN;
    8579  }
    8580 
    8581  // Apply alignment.
    8582  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8583 
    8584  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8585  // Make bigger alignment if necessary.
    8586  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8587  {
    8588  bool bufferImageGranularityConflict = false;
    8589  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8590  {
    8591  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8592  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8593  {
    8594  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8595  {
    8596  bufferImageGranularityConflict = true;
    8597  break;
    8598  }
    8599  }
    8600  else
    8601  // Already on previous page.
    8602  break;
    8603  }
    8604  if(bufferImageGranularityConflict)
    8605  {
    8606  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8607  }
    8608  }
    8609 
    8610  // There is enough free space.
    8611  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8612  suballocations1st.back().offset + suballocations1st.back().size :
    8613  0;
    8614  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8615  {
    8616  // Check previous suballocations for BufferImageGranularity conflicts.
    8617  // If conflict exists, allocation cannot be made here.
    8618  if(bufferImageGranularity > 1)
    8619  {
    8620  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8621  {
    8622  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8623  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8624  {
    8625  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8626  {
    8627  return false;
    8628  }
    8629  }
    8630  else
    8631  {
    8632  // Already on next page.
    8633  break;
    8634  }
    8635  }
    8636  }
    8637 
    8638  // All tests passed: Success.
    8639  pAllocationRequest->offset = resultOffset;
    8640  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8641  pAllocationRequest->sumItemSize = 0;
    8642  // pAllocationRequest->item unused.
    8643  pAllocationRequest->itemsToMakeLostCount = 0;
    8644  return true;
    8645  }
    8646  }
    8647  else // !upperAddress
    8648  {
    8649  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8650  {
    8651  // Try to allocate at the end of 1st vector.
    8652 
    8653  VkDeviceSize resultBaseOffset = 0;
    8654  if(!suballocations1st.empty())
    8655  {
    8656  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8657  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8658  }
    8659 
    8660  // Start from offset equal to beginning of free space.
    8661  VkDeviceSize resultOffset = resultBaseOffset;
    8662 
    8663  // Apply VMA_DEBUG_MARGIN at the beginning.
    8664  if(VMA_DEBUG_MARGIN > 0)
    8665  {
    8666  resultOffset += VMA_DEBUG_MARGIN;
    8667  }
    8668 
    8669  // Apply alignment.
    8670  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8671 
    8672  // Check previous suballocations for BufferImageGranularity conflicts.
    8673  // Make bigger alignment if necessary.
    8674  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8675  {
    8676  bool bufferImageGranularityConflict = false;
    8677  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8678  {
    8679  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8680  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8681  {
    8682  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8683  {
    8684  bufferImageGranularityConflict = true;
    8685  break;
    8686  }
    8687  }
    8688  else
    8689  // Already on previous page.
    8690  break;
    8691  }
    8692  if(bufferImageGranularityConflict)
    8693  {
    8694  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8695  }
    8696  }
    8697 
    8698  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8699  suballocations2nd.back().offset : size;
    8700 
    8701  // There is enough free space at the end after alignment.
    8702  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8703  {
    8704  // Check next suballocations for BufferImageGranularity conflicts.
    8705  // If conflict exists, allocation cannot be made here.
    8706  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8707  {
    8708  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8709  {
    8710  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8711  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8712  {
    8713  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8714  {
    8715  return false;
    8716  }
    8717  }
    8718  else
    8719  {
    8720  // Already on previous page.
    8721  break;
    8722  }
    8723  }
    8724  }
    8725 
    8726  // All tests passed: Success.
    8727  pAllocationRequest->offset = resultOffset;
    8728  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8729  pAllocationRequest->sumItemSize = 0;
    8730  // pAllocationRequest->item unused.
    8731  pAllocationRequest->itemsToMakeLostCount = 0;
    8732  return true;
    8733  }
    8734  }
    8735 
    8736  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8737  // beginning of 1st vector as the end of free space.
    8738  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8739  {
    8740  VMA_ASSERT(!suballocations1st.empty());
    8741 
    8742  VkDeviceSize resultBaseOffset = 0;
    8743  if(!suballocations2nd.empty())
    8744  {
    8745  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8746  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8747  }
    8748 
    8749  // Start from offset equal to beginning of free space.
    8750  VkDeviceSize resultOffset = resultBaseOffset;
    8751 
    8752  // Apply VMA_DEBUG_MARGIN at the beginning.
    8753  if(VMA_DEBUG_MARGIN > 0)
    8754  {
    8755  resultOffset += VMA_DEBUG_MARGIN;
    8756  }
    8757 
    8758  // Apply alignment.
    8759  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8760 
    8761  // Check previous suballocations for BufferImageGranularity conflicts.
    8762  // Make bigger alignment if necessary.
    8763  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8764  {
    8765  bool bufferImageGranularityConflict = false;
    8766  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8767  {
    8768  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8769  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8770  {
    8771  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8772  {
    8773  bufferImageGranularityConflict = true;
    8774  break;
    8775  }
    8776  }
    8777  else
    8778  // Already on previous page.
    8779  break;
    8780  }
    8781  if(bufferImageGranularityConflict)
    8782  {
    8783  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8784  }
    8785  }
    8786 
    8787  pAllocationRequest->itemsToMakeLostCount = 0;
    8788  pAllocationRequest->sumItemSize = 0;
    8789  size_t index1st = m_1stNullItemsBeginCount;
    8790 
    8791  if(canMakeOtherLost)
    8792  {
    8793  while(index1st < suballocations1st.size() &&
    8794  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8795  {
    8796  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8797  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8798  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8799  {
    8800  // No problem.
    8801  }
    8802  else
    8803  {
    8804  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8805  if(suballoc.hAllocation->CanBecomeLost() &&
    8806  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8807  {
    8808  ++pAllocationRequest->itemsToMakeLostCount;
    8809  pAllocationRequest->sumItemSize += suballoc.size;
    8810  }
    8811  else
    8812  {
    8813  return false;
    8814  }
    8815  }
    8816  ++index1st;
    8817  }
    8818 
    8819  // Check next suballocations for BufferImageGranularity conflicts.
    8820  // If conflict exists, we must mark more allocations lost or fail.
    8821  if(bufferImageGranularity > 1)
    8822  {
    8823  while(index1st < suballocations1st.size())
    8824  {
    8825  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8826  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8827  {
    8828  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8829  {
    8830  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8831  if(suballoc.hAllocation->CanBecomeLost() &&
    8832  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8833  {
    8834  ++pAllocationRequest->itemsToMakeLostCount;
    8835  pAllocationRequest->sumItemSize += suballoc.size;
    8836  }
    8837  else
    8838  {
    8839  return false;
    8840  }
    8841  }
    8842  }
    8843  else
    8844  {
    8845  // Already on next page.
    8846  break;
    8847  }
    8848  ++index1st;
    8849  }
    8850  }
    8851  }
    8852 
    8853  // There is enough free space at the end after alignment.
    8854  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8855  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8856  {
    8857  // Check next suballocations for BufferImageGranularity conflicts.
    8858  // If conflict exists, allocation cannot be made here.
    8859  if(bufferImageGranularity > 1)
    8860  {
    8861  for(size_t nextSuballocIndex = index1st;
    8862  nextSuballocIndex < suballocations1st.size();
    8863  nextSuballocIndex++)
    8864  {
    8865  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8866  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8867  {
    8868  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8869  {
    8870  return false;
    8871  }
    8872  }
    8873  else
    8874  {
    8875  // Already on next page.
    8876  break;
    8877  }
    8878  }
    8879  }
    8880 
    8881  // All tests passed: Success.
    8882  pAllocationRequest->offset = resultOffset;
    8883  pAllocationRequest->sumFreeSize =
    8884  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8885  - resultBaseOffset
    8886  - pAllocationRequest->sumItemSize;
    8887  // pAllocationRequest->item unused.
    8888  return true;
    8889  }
    8890  }
    8891  }
    8892 
    8893  return false;
    8894 }
    8895 
    8896 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8897  uint32_t currentFrameIndex,
    8898  uint32_t frameInUseCount,
    8899  VmaAllocationRequest* pAllocationRequest)
    8900 {
    8901  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8902  {
    8903  return true;
    8904  }
    8905 
    8906  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8907 
    8908  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8909  size_t index1st = m_1stNullItemsBeginCount;
    8910  size_t madeLostCount = 0;
    8911  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8912  {
    8913  VMA_ASSERT(index1st < suballocations1st.size());
    8914  VmaSuballocation& suballoc = suballocations1st[index1st];
    8915  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8916  {
    8917  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8918  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8919  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8920  {
    8921  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8922  suballoc.hAllocation = VK_NULL_HANDLE;
    8923  m_SumFreeSize += suballoc.size;
    8924  ++m_1stNullItemsMiddleCount;
    8925  ++madeLostCount;
    8926  }
    8927  else
    8928  {
    8929  return false;
    8930  }
    8931  }
    8932  ++index1st;
    8933  }
    8934 
    8935  CleanupAfterFree();
    8936  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8937 
    8938  return true;
    8939 }
    8940 
    8941 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8942 {
    8943  uint32_t lostAllocationCount = 0;
    8944 
    8945  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8946  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8947  {
    8948  VmaSuballocation& suballoc = suballocations1st[i];
    8949  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8950  suballoc.hAllocation->CanBecomeLost() &&
    8951  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8952  {
    8953  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8954  suballoc.hAllocation = VK_NULL_HANDLE;
    8955  ++m_1stNullItemsMiddleCount;
    8956  m_SumFreeSize += suballoc.size;
    8957  ++lostAllocationCount;
    8958  }
    8959  }
    8960 
    8961  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8962  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8963  {
    8964  VmaSuballocation& suballoc = suballocations2nd[i];
    8965  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8966  suballoc.hAllocation->CanBecomeLost() &&
    8967  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8968  {
    8969  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8970  suballoc.hAllocation = VK_NULL_HANDLE;
    8971  ++m_2ndNullItemsCount;
    8972  ++lostAllocationCount;
    8973  }
    8974  }
    8975 
    8976  if(lostAllocationCount)
    8977  {
    8978  CleanupAfterFree();
    8979  }
    8980 
    8981  return lostAllocationCount;
    8982 }
    8983 
    8984 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8985 {
    8986  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8987  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8988  {
    8989  const VmaSuballocation& suballoc = suballocations1st[i];
    8990  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8991  {
    8992  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    8993  {
    8994  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8995  return VK_ERROR_VALIDATION_FAILED_EXT;
    8996  }
    8997  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    8998  {
    8999  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9000  return VK_ERROR_VALIDATION_FAILED_EXT;
    9001  }
    9002  }
    9003  }
    9004 
    9005  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9006  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9007  {
    9008  const VmaSuballocation& suballoc = suballocations2nd[i];
    9009  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9010  {
    9011  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9012  {
    9013  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9014  return VK_ERROR_VALIDATION_FAILED_EXT;
    9015  }
    9016  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9017  {
    9018  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9019  return VK_ERROR_VALIDATION_FAILED_EXT;
    9020  }
    9021  }
    9022  }
    9023 
    9024  return VK_SUCCESS;
    9025 }
    9026 
    9027 void VmaBlockMetadata_Linear::Alloc(
    9028  const VmaAllocationRequest& request,
    9029  VmaSuballocationType type,
    9030  VkDeviceSize allocSize,
    9031  bool upperAddress,
    9032  VmaAllocation hAllocation)
    9033 {
    9034  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9035 
    9036  if(upperAddress)
    9037  {
    9038  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9039  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9040  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9041  suballocations2nd.push_back(newSuballoc);
    9042  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9043  }
    9044  else
    9045  {
    9046  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9047 
    9048  // First allocation.
    9049  if(suballocations1st.empty())
    9050  {
    9051  suballocations1st.push_back(newSuballoc);
    9052  }
    9053  else
    9054  {
    9055  // New allocation at the end of 1st vector.
    9056  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9057  {
    9058  // Check if it fits before the end of the block.
    9059  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9063  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9064  {
    9065  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9066 
    9067  switch(m_2ndVectorMode)
    9068  {
    9069  case SECOND_VECTOR_EMPTY:
    9070  // First allocation from second part ring buffer.
    9071  VMA_ASSERT(suballocations2nd.empty());
    9072  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9073  break;
    9074  case SECOND_VECTOR_RING_BUFFER:
    9075  // 2-part ring buffer is already started.
    9076  VMA_ASSERT(!suballocations2nd.empty());
    9077  break;
    9078  case SECOND_VECTOR_DOUBLE_STACK:
    9079  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9080  break;
    9081  default:
    9082  VMA_ASSERT(0);
    9083  }
    9084 
    9085  suballocations2nd.push_back(newSuballoc);
    9086  }
    9087  else
    9088  {
    9089  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9090  }
    9091  }
    9092  }
    9093 
    9094  m_SumFreeSize -= newSuballoc.size;
    9095 }
    9096 
    9097 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9098 {
    9099  FreeAtOffset(allocation->GetOffset());
    9100 }
    9101 
    9102 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9103 {
    9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9105  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9106 
    9107  if(!suballocations1st.empty())
    9108  {
    9109  // First allocation: Mark it as next empty at the beginning.
    9110  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9111  if(firstSuballoc.offset == offset)
    9112  {
    9113  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9114  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9115  m_SumFreeSize += firstSuballoc.size;
    9116  ++m_1stNullItemsBeginCount;
    9117  CleanupAfterFree();
    9118  return;
    9119  }
    9120  }
    9121 
    9122  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9123  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9124  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9125  {
    9126  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9127  if(lastSuballoc.offset == offset)
    9128  {
    9129  m_SumFreeSize += lastSuballoc.size;
    9130  suballocations2nd.pop_back();
    9131  CleanupAfterFree();
    9132  return;
    9133  }
    9134  }
    9135  // Last allocation in 1st vector.
    9136  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9137  {
    9138  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9139  if(lastSuballoc.offset == offset)
    9140  {
    9141  m_SumFreeSize += lastSuballoc.size;
    9142  suballocations1st.pop_back();
    9143  CleanupAfterFree();
    9144  return;
    9145  }
    9146  }
    9147 
    9148  // Item from the middle of 1st vector.
    9149  {
    9150  VmaSuballocation refSuballoc;
    9151  refSuballoc.offset = offset;
    9152  // Rest of members stays uninitialized intentionally for better performance.
    9153  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9154  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9155  suballocations1st.end(),
    9156  refSuballoc);
    9157  if(it != suballocations1st.end())
    9158  {
    9159  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9160  it->hAllocation = VK_NULL_HANDLE;
    9161  ++m_1stNullItemsMiddleCount;
    9162  m_SumFreeSize += it->size;
    9163  CleanupAfterFree();
    9164  return;
    9165  }
    9166  }
    9167 
    9168  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9169  {
    9170  // Item from the middle of 2nd vector.
    9171  VmaSuballocation refSuballoc;
    9172  refSuballoc.offset = offset;
    9173  // Rest of members stays uninitialized intentionally for better performance.
    9174  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9175  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9176  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9177  if(it != suballocations2nd.end())
    9178  {
    9179  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9180  it->hAllocation = VK_NULL_HANDLE;
    9181  ++m_2ndNullItemsCount;
    9182  m_SumFreeSize += it->size;
    9183  CleanupAfterFree();
    9184  return;
    9185  }
    9186  }
    9187 
    9188  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9189 }
    9190 
    9191 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9192 {
    9193  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9194  const size_t suballocCount = AccessSuballocations1st().size();
    9195  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9196 }
    9197 
    9198 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9199 {
    9200  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9202 
    9203  if(IsEmpty())
    9204  {
    9205  suballocations1st.clear();
    9206  suballocations2nd.clear();
    9207  m_1stNullItemsBeginCount = 0;
    9208  m_1stNullItemsMiddleCount = 0;
    9209  m_2ndNullItemsCount = 0;
    9210  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9211  }
    9212  else
    9213  {
    9214  const size_t suballoc1stCount = suballocations1st.size();
    9215  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9216  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9217 
    9218  // Find more null items at the beginning of 1st vector.
    9219  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9220  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9221  {
    9222  ++m_1stNullItemsBeginCount;
    9223  --m_1stNullItemsMiddleCount;
    9224  }
    9225 
    9226  // Find more null items at the end of 1st vector.
    9227  while(m_1stNullItemsMiddleCount > 0 &&
    9228  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9229  {
    9230  --m_1stNullItemsMiddleCount;
    9231  suballocations1st.pop_back();
    9232  }
    9233 
    9234  // Find more null items at the end of 2nd vector.
    9235  while(m_2ndNullItemsCount > 0 &&
    9236  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9237  {
    9238  --m_2ndNullItemsCount;
    9239  suballocations2nd.pop_back();
    9240  }
    9241 
    9242  if(ShouldCompact1st())
    9243  {
    9244  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9245  size_t srcIndex = m_1stNullItemsBeginCount;
    9246  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9247  {
    9248  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9249  {
    9250  ++srcIndex;
    9251  }
    9252  if(dstIndex != srcIndex)
    9253  {
    9254  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9255  }
    9256  ++srcIndex;
    9257  }
    9258  suballocations1st.resize(nonNullItemCount);
    9259  m_1stNullItemsBeginCount = 0;
    9260  m_1stNullItemsMiddleCount = 0;
    9261  }
    9262 
    9263  // 2nd vector became empty.
    9264  if(suballocations2nd.empty())
    9265  {
    9266  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9267  }
    9268 
    9269  // 1st vector became empty.
    9270  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9271  {
    9272  suballocations1st.clear();
    9273  m_1stNullItemsBeginCount = 0;
    9274 
    9275  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9276  {
    9277  // Swap 1st with 2nd. Now 2nd is empty.
    9278  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9279  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9280  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9281  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9282  {
    9283  ++m_1stNullItemsBeginCount;
    9284  --m_1stNullItemsMiddleCount;
    9285  }
    9286  m_2ndNullItemsCount = 0;
    9287  m_1stVectorIndex ^= 1;
    9288  }
    9289  }
    9290  }
    9291 
    9292  VMA_HEAVY_ASSERT(Validate());
    9293 }
    9294 
    9295 
    9297 // class VmaBlockMetadata_Buddy
    9298 
    9299 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9300  VmaBlockMetadata(hAllocator),
    9301  m_Root(VMA_NULL),
    9302  m_AllocationCount(0),
    9303  m_FreeCount(1),
    9304  m_SumFreeSize(0)
    9305 {
    9306  memset(m_FreeList, 0, sizeof(m_FreeList));
    9307 }
    9308 
    9309 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9310 {
    9311  DeleteNode(m_Root);
    9312 }
    9313 
    9314 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9315 {
    9316  VmaBlockMetadata::Init(size);
    9317 
    9318  m_UsableSize = VmaPrevPow2(size);
    9319  m_SumFreeSize = m_UsableSize;
    9320 
    9321  // Calculate m_LevelCount.
    9322  m_LevelCount = 1;
    9323  while(m_LevelCount < MAX_LEVELS &&
    9324  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9325  {
    9326  ++m_LevelCount;
    9327  }
    9328 
    9329  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9330  rootNode->offset = 0;
    9331  rootNode->type = Node::TYPE_FREE;
    9332  rootNode->parent = VMA_NULL;
    9333  rootNode->buddy = VMA_NULL;
    9334 
    9335  m_Root = rootNode;
    9336  AddToFreeListFront(0, rootNode);
    9337 }
    9338 
    9339 bool VmaBlockMetadata_Buddy::Validate() const
    9340 {
    9341  // Validate tree.
    9342  ValidationContext ctx;
    9343  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9344  {
    9345  VMA_VALIDATE(false && "ValidateNode failed.");
    9346  }
    9347  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9348  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9349 
    9350  // Validate free node lists.
    9351  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9352  {
    9353  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9354  m_FreeList[level].front->free.prev == VMA_NULL);
    9355 
    9356  for(Node* node = m_FreeList[level].front;
    9357  node != VMA_NULL;
    9358  node = node->free.next)
    9359  {
    9360  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9361 
    9362  if(node->free.next == VMA_NULL)
    9363  {
    9364  VMA_VALIDATE(m_FreeList[level].back == node);
    9365  }
    9366  else
    9367  {
    9368  VMA_VALIDATE(node->free.next->free.prev == node);
    9369  }
    9370  }
    9371  }
    9372 
    9373  // Validate that free lists ar higher levels are empty.
    9374  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9375  {
    9376  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9377  }
    9378 
    9379  return true;
    9380 }
    9381 
    9382 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9383 {
    9384  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9385  {
    9386  if(m_FreeList[level].front != VMA_NULL)
    9387  {
    9388  return LevelToNodeSize(level);
    9389  }
    9390  }
    9391  return 0;
    9392 }
    9393 
    9394 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9395 {
    9396  const VkDeviceSize unusableSize = GetUnusableSize();
    9397 
    9398  outInfo.blockCount = 1;
    9399 
    9400  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9401  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9402 
    9403  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9404  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9405  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9406 
    9407  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9408 
    9409  if(unusableSize > 0)
    9410  {
    9411  ++outInfo.unusedRangeCount;
    9412  outInfo.unusedBytes += unusableSize;
    9413  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9414  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9415  }
    9416 }
    9417 
    9418 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9419 {
    9420  const VkDeviceSize unusableSize = GetUnusableSize();
    9421 
    9422  inoutStats.size += GetSize();
    9423  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9424  inoutStats.allocationCount += m_AllocationCount;
    9425  inoutStats.unusedRangeCount += m_FreeCount;
    9426  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9427 
    9428  if(unusableSize > 0)
    9429  {
    9430  ++inoutStats.unusedRangeCount;
    9431  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9432  }
    9433 }
    9434 
    9435 #if VMA_STATS_STRING_ENABLED
    9436 
    9437 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9438 {
    9439  // TODO optimize
    9440  VmaStatInfo stat;
    9441  CalcAllocationStatInfo(stat);
    9442 
    9443  PrintDetailedMap_Begin(
    9444  json,
    9445  stat.unusedBytes,
    9446  stat.allocationCount,
    9447  stat.unusedRangeCount);
    9448 
    9449  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9450 
    9451  const VkDeviceSize unusableSize = GetUnusableSize();
    9452  if(unusableSize > 0)
    9453  {
    9454  PrintDetailedMap_UnusedRange(json,
    9455  m_UsableSize, // offset
    9456  unusableSize); // size
    9457  }
    9458 
    9459  PrintDetailedMap_End(json);
    9460 }
    9461 
    9462 #endif // #if VMA_STATS_STRING_ENABLED
    9463 
    9464 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9465  uint32_t currentFrameIndex,
    9466  uint32_t frameInUseCount,
    9467  VkDeviceSize bufferImageGranularity,
    9468  VkDeviceSize allocSize,
    9469  VkDeviceSize allocAlignment,
    9470  bool upperAddress,
    9471  VmaSuballocationType allocType,
    9472  bool canMakeOtherLost,
    9473  uint32_t strategy,
    9474  VmaAllocationRequest* pAllocationRequest)
    9475 {
    9476  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9477 
    9478  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9479  // Whenever it might be an OPTIMAL image...
    9480  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9481  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9482  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9483  {
    9484  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9485  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9486  }
    9487 
    9488  if(allocSize > m_UsableSize)
    9489  {
    9490  return false;
    9491  }
    9492 
    9493  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9494  for(uint32_t level = targetLevel + 1; level--; )
    9495  {
    9496  for(Node* freeNode = m_FreeList[level].front;
    9497  freeNode != VMA_NULL;
    9498  freeNode = freeNode->free.next)
    9499  {
    9500  if(freeNode->offset % allocAlignment == 0)
    9501  {
    9502  pAllocationRequest->offset = freeNode->offset;
    9503  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9504  pAllocationRequest->sumItemSize = 0;
    9505  pAllocationRequest->itemsToMakeLostCount = 0;
    9506  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9507  return true;
    9508  }
    9509  }
    9510  }
    9511 
    9512  return false;
    9513 }
    9514 
    9515 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9516  uint32_t currentFrameIndex,
    9517  uint32_t frameInUseCount,
    9518  VmaAllocationRequest* pAllocationRequest)
    9519 {
    9520  /*
    9521  Lost allocations are not supported in buddy allocator at the moment.
    9522  Support might be added in the future.
    9523  */
    9524  return pAllocationRequest->itemsToMakeLostCount == 0;
    9525 }
    9526 
    9527 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return 0;
    9534 }
    9535 
    9536 void VmaBlockMetadata_Buddy::Alloc(
    9537  const VmaAllocationRequest& request,
    9538  VmaSuballocationType type,
    9539  VkDeviceSize allocSize,
    9540  bool upperAddress,
    9541  VmaAllocation hAllocation)
    9542 {
    9543  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9544  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9545 
    9546  Node* currNode = m_FreeList[currLevel].front;
    9547  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9548  while(currNode->offset != request.offset)
    9549  {
    9550  currNode = currNode->free.next;
    9551  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9552  }
    9553 
    9554  // Go down, splitting free nodes.
    9555  while(currLevel < targetLevel)
    9556  {
    9557  // currNode is already first free node at currLevel.
    9558  // Remove it from list of free nodes at this currLevel.
    9559  RemoveFromFreeList(currLevel, currNode);
    9560 
    9561  const uint32_t childrenLevel = currLevel + 1;
    9562 
    9563  // Create two free sub-nodes.
    9564  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9565  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9566 
    9567  leftChild->offset = currNode->offset;
    9568  leftChild->type = Node::TYPE_FREE;
    9569  leftChild->parent = currNode;
    9570  leftChild->buddy = rightChild;
    9571 
    9572  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9573  rightChild->type = Node::TYPE_FREE;
    9574  rightChild->parent = currNode;
    9575  rightChild->buddy = leftChild;
    9576 
    9577  // Convert current currNode to split type.
    9578  currNode->type = Node::TYPE_SPLIT;
    9579  currNode->split.leftChild = leftChild;
    9580 
    9581  // Add child nodes to free list. Order is important!
    9582  AddToFreeListFront(childrenLevel, rightChild);
    9583  AddToFreeListFront(childrenLevel, leftChild);
    9584 
    9585  ++m_FreeCount;
    9586  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9587  ++currLevel;
    9588  currNode = m_FreeList[currLevel].front;
    9589 
    9590  /*
    9591  We can be sure that currNode, as left child of node previously split,
    9592  also fullfills the alignment requirement.
    9593  */
    9594  }
    9595 
    9596  // Remove from free list.
    9597  VMA_ASSERT(currLevel == targetLevel &&
    9598  currNode != VMA_NULL &&
    9599  currNode->type == Node::TYPE_FREE);
    9600  RemoveFromFreeList(currLevel, currNode);
    9601 
    9602  // Convert to allocation node.
    9603  currNode->type = Node::TYPE_ALLOCATION;
    9604  currNode->allocation.alloc = hAllocation;
    9605 
    9606  ++m_AllocationCount;
    9607  --m_FreeCount;
    9608  m_SumFreeSize -= allocSize;
    9609 }
    9610 
    9611 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9612 {
    9613  if(node->type == Node::TYPE_SPLIT)
    9614  {
    9615  DeleteNode(node->split.leftChild->buddy);
    9616  DeleteNode(node->split.leftChild);
    9617  }
    9618 
    9619  vma_delete(GetAllocationCallbacks(), node);
    9620 }
    9621 
    9622 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9623 {
    9624  VMA_VALIDATE(level < m_LevelCount);
    9625  VMA_VALIDATE(curr->parent == parent);
    9626  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9627  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9628  switch(curr->type)
    9629  {
    9630  case Node::TYPE_FREE:
    9631  // curr->free.prev, next are validated separately.
    9632  ctx.calculatedSumFreeSize += levelNodeSize;
    9633  ++ctx.calculatedFreeCount;
    9634  break;
    9635  case Node::TYPE_ALLOCATION:
    9636  ++ctx.calculatedAllocationCount;
    9637  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9638  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9639  break;
    9640  case Node::TYPE_SPLIT:
    9641  {
    9642  const uint32_t childrenLevel = level + 1;
    9643  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9644  const Node* const leftChild = curr->split.leftChild;
    9645  VMA_VALIDATE(leftChild != VMA_NULL);
    9646  VMA_VALIDATE(leftChild->offset == curr->offset);
    9647  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9648  {
    9649  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9650  }
    9651  const Node* const rightChild = leftChild->buddy;
    9652  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9653  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9654  {
    9655  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9656  }
    9657  }
    9658  break;
    9659  default:
    9660  return false;
    9661  }
    9662 
    9663  return true;
    9664 }
    9665 
    9666 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9667 {
    9668  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9669  uint32_t level = 0;
    9670  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9671  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9672  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9673  {
    9674  ++level;
    9675  currLevelNodeSize = nextLevelNodeSize;
    9676  nextLevelNodeSize = currLevelNodeSize >> 1;
    9677  }
    9678  return level;
    9679 }
    9680 
    9681 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9682 {
    9683  // Find node and level.
    9684  Node* node = m_Root;
    9685  VkDeviceSize nodeOffset = 0;
    9686  uint32_t level = 0;
    9687  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9688  while(node->type == Node::TYPE_SPLIT)
    9689  {
    9690  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9691  if(offset < nodeOffset + nextLevelSize)
    9692  {
    9693  node = node->split.leftChild;
    9694  }
    9695  else
    9696  {
    9697  node = node->split.leftChild->buddy;
    9698  nodeOffset += nextLevelSize;
    9699  }
    9700  ++level;
    9701  levelNodeSize = nextLevelSize;
    9702  }
    9703 
    9704  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9705  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9706 
    9707  ++m_FreeCount;
    9708  --m_AllocationCount;
    9709  m_SumFreeSize += alloc->GetSize();
    9710 
    9711  node->type = Node::TYPE_FREE;
    9712 
    9713  // Join free nodes if possible.
    9714  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9715  {
    9716  RemoveFromFreeList(level, node->buddy);
    9717  Node* const parent = node->parent;
    9718 
    9719  vma_delete(GetAllocationCallbacks(), node->buddy);
    9720  vma_delete(GetAllocationCallbacks(), node);
    9721  parent->type = Node::TYPE_FREE;
    9722 
    9723  node = parent;
    9724  --level;
    9725  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9726  --m_FreeCount;
    9727  }
    9728 
    9729  AddToFreeListFront(level, node);
    9730 }
    9731 
    9732 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9733 {
    9734  switch(node->type)
    9735  {
    9736  case Node::TYPE_FREE:
    9737  ++outInfo.unusedRangeCount;
    9738  outInfo.unusedBytes += levelNodeSize;
    9739  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9740  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9741  break;
    9742  case Node::TYPE_ALLOCATION:
    9743  {
    9744  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9745  ++outInfo.allocationCount;
    9746  outInfo.usedBytes += allocSize;
    9747  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9748  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9749 
    9750  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9751  if(unusedRangeSize > 0)
    9752  {
    9753  ++outInfo.unusedRangeCount;
    9754  outInfo.unusedBytes += unusedRangeSize;
    9755  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9756  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9757  }
    9758  }
    9759  break;
    9760  case Node::TYPE_SPLIT:
    9761  {
    9762  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9763  const Node* const leftChild = node->split.leftChild;
    9764  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9765  const Node* const rightChild = leftChild->buddy;
    9766  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9767  }
    9768  break;
    9769  default:
    9770  VMA_ASSERT(0);
    9771  }
    9772 }
    9773 
    9774 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9775 {
    9776  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9777 
    9778  // List is empty.
    9779  Node* const frontNode = m_FreeList[level].front;
    9780  if(frontNode == VMA_NULL)
    9781  {
    9782  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9783  node->free.prev = node->free.next = VMA_NULL;
    9784  m_FreeList[level].front = m_FreeList[level].back = node;
    9785  }
    9786  else
    9787  {
    9788  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9789  node->free.prev = VMA_NULL;
    9790  node->free.next = frontNode;
    9791  frontNode->free.prev = node;
    9792  m_FreeList[level].front = node;
    9793  }
    9794 }
    9795 
    9796 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9797 {
    9798  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9799 
    9800  // It is at the front.
    9801  if(node->free.prev == VMA_NULL)
    9802  {
    9803  VMA_ASSERT(m_FreeList[level].front == node);
    9804  m_FreeList[level].front = node->free.next;
    9805  }
    9806  else
    9807  {
    9808  Node* const prevFreeNode = node->free.prev;
    9809  VMA_ASSERT(prevFreeNode->free.next == node);
    9810  prevFreeNode->free.next = node->free.next;
    9811  }
    9812 
    9813  // It is at the back.
    9814  if(node->free.next == VMA_NULL)
    9815  {
    9816  VMA_ASSERT(m_FreeList[level].back == node);
    9817  m_FreeList[level].back = node->free.prev;
    9818  }
    9819  else
    9820  {
    9821  Node* const nextFreeNode = node->free.next;
    9822  VMA_ASSERT(nextFreeNode->free.prev == node);
    9823  nextFreeNode->free.prev = node->free.prev;
    9824  }
    9825 }
    9826 
    9827 #if VMA_STATS_STRING_ENABLED
    9828 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9829 {
    9830  switch(node->type)
    9831  {
    9832  case Node::TYPE_FREE:
    9833  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  {
    9837  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9838  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9839  if(allocSize < levelNodeSize)
    9840  {
    9841  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9842  }
    9843  }
    9844  break;
    9845  case Node::TYPE_SPLIT:
    9846  {
    9847  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9848  const Node* const leftChild = node->split.leftChild;
    9849  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9850  const Node* const rightChild = leftChild->buddy;
    9851  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9852  }
    9853  break;
    9854  default:
    9855  VMA_ASSERT(0);
    9856  }
    9857 }
    9858 #endif // #if VMA_STATS_STRING_ENABLED
    9859 
    9860 
    9862 // class VmaDeviceMemoryBlock
    9863 
    9864 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9865  m_pMetadata(VMA_NULL),
    9866  m_MemoryTypeIndex(UINT32_MAX),
    9867  m_Id(0),
    9868  m_hMemory(VK_NULL_HANDLE),
    9869  m_MapCount(0),
    9870  m_pMappedData(VMA_NULL)
    9871 {
    9872 }
    9873 
    9874 void VmaDeviceMemoryBlock::Init(
    9875  VmaAllocator hAllocator,
    9876  uint32_t newMemoryTypeIndex,
    9877  VkDeviceMemory newMemory,
    9878  VkDeviceSize newSize,
    9879  uint32_t id,
    9880  uint32_t algorithm)
    9881 {
    9882  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9883 
    9884  m_MemoryTypeIndex = newMemoryTypeIndex;
    9885  m_Id = id;
    9886  m_hMemory = newMemory;
    9887 
    9888  switch(algorithm)
    9889  {
    9891  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9892  break;
    9894  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9895  break;
    9896  default:
    9897  VMA_ASSERT(0);
    9898  // Fall-through.
    9899  case 0:
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9901  }
    9902  m_pMetadata->Init(newSize);
    9903 }
    9904 
    9905 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9906 {
    9907  // This is the most important assert in the entire library.
    9908  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9909  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9910 
    9911  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9912  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9913  m_hMemory = VK_NULL_HANDLE;
    9914 
    9915  vma_delete(allocator, m_pMetadata);
    9916  m_pMetadata = VMA_NULL;
    9917 }
    9918 
    9919 bool VmaDeviceMemoryBlock::Validate() const
    9920 {
    9921  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9922  (m_pMetadata->GetSize() != 0));
    9923 
    9924  return m_pMetadata->Validate();
    9925 }
    9926 
    9927 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9928 {
    9929  void* pData = nullptr;
    9930  VkResult res = Map(hAllocator, 1, &pData);
    9931  if(res != VK_SUCCESS)
    9932  {
    9933  return res;
    9934  }
    9935 
    9936  res = m_pMetadata->CheckCorruption(pData);
    9937 
    9938  Unmap(hAllocator, 1);
    9939 
    9940  return res;
    9941 }
    9942 
    9943 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9944 {
    9945  if(count == 0)
    9946  {
    9947  return VK_SUCCESS;
    9948  }
    9949 
    9950  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9951  if(m_MapCount != 0)
    9952  {
    9953  m_MapCount += count;
    9954  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9955  if(ppData != VMA_NULL)
    9956  {
    9957  *ppData = m_pMappedData;
    9958  }
    9959  return VK_SUCCESS;
    9960  }
    9961  else
    9962  {
    9963  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9964  hAllocator->m_hDevice,
    9965  m_hMemory,
    9966  0, // offset
    9967  VK_WHOLE_SIZE,
    9968  0, // flags
    9969  &m_pMappedData);
    9970  if(result == VK_SUCCESS)
    9971  {
    9972  if(ppData != VMA_NULL)
    9973  {
    9974  *ppData = m_pMappedData;
    9975  }
    9976  m_MapCount = count;
    9977  }
    9978  return result;
    9979  }
    9980 }
    9981 
    9982 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9983 {
    9984  if(count == 0)
    9985  {
    9986  return;
    9987  }
    9988 
    9989  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9990  if(m_MapCount >= count)
    9991  {
    9992  m_MapCount -= count;
    9993  if(m_MapCount == 0)
    9994  {
    9995  m_pMappedData = VMA_NULL;
    9996  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    9997  }
    9998  }
    9999  else
    10000  {
    10001  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10002  }
    10003 }
    10004 
    10005 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10006 {
    10007  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10008  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10009 
    10010  void* pData;
    10011  VkResult res = Map(hAllocator, 1, &pData);
    10012  if(res != VK_SUCCESS)
    10013  {
    10014  return res;
    10015  }
    10016 
    10017  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10018  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10019 
    10020  Unmap(hAllocator, 1);
    10021 
    10022  return VK_SUCCESS;
    10023 }
    10024 
    10025 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10026 {
    10027  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10028  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10029 
    10030  void* pData;
    10031  VkResult res = Map(hAllocator, 1, &pData);
    10032  if(res != VK_SUCCESS)
    10033  {
    10034  return res;
    10035  }
    10036 
    10037  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10038  {
    10039  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10040  }
    10041  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10042  {
    10043  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10044  }
    10045 
    10046  Unmap(hAllocator, 1);
    10047 
    10048  return VK_SUCCESS;
    10049 }
    10050 
    10051 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10052  const VmaAllocator hAllocator,
    10053  const VmaAllocation hAllocation,
    10054  VkBuffer hBuffer)
    10055 {
    10056  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10057  hAllocation->GetBlock() == this);
    10058  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10059  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10060  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10061  hAllocator->m_hDevice,
    10062  hBuffer,
    10063  m_hMemory,
    10064  hAllocation->GetOffset());
    10065 }
    10066 
    10067 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10068  const VmaAllocator hAllocator,
    10069  const VmaAllocation hAllocation,
    10070  VkImage hImage)
    10071 {
    10072  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10073  hAllocation->GetBlock() == this);
    10074  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10075  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10076  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10077  hAllocator->m_hDevice,
    10078  hImage,
    10079  m_hMemory,
    10080  hAllocation->GetOffset());
    10081 }
    10082 
    10083 static void InitStatInfo(VmaStatInfo& outInfo)
    10084 {
    10085  memset(&outInfo, 0, sizeof(outInfo));
    10086  outInfo.allocationSizeMin = UINT64_MAX;
    10087  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10088 }
    10089 
    10090 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10091 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10092 {
    10093  inoutInfo.blockCount += srcInfo.blockCount;
    10094  inoutInfo.allocationCount += srcInfo.allocationCount;
    10095  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10096  inoutInfo.usedBytes += srcInfo.usedBytes;
    10097  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10098  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10099  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10100  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10101  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10102 }
    10103 
    10104 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10105 {
    10106  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10107  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10108  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10109  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10110 }
    10111 
    10112 VmaPool_T::VmaPool_T(
    10113  VmaAllocator hAllocator,
    10114  const VmaPoolCreateInfo& createInfo,
    10115  VkDeviceSize preferredBlockSize) :
    10116  m_BlockVector(
    10117  hAllocator,
    10118  createInfo.memoryTypeIndex,
    10119  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10120  createInfo.minBlockCount,
    10121  createInfo.maxBlockCount,
    10122  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10123  createInfo.frameInUseCount,
    10124  true, // isCustomPool
    10125  createInfo.blockSize != 0, // explicitBlockSize
    10126  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10127  m_Id(0)
    10128 {
    10129 }
    10130 
    10131 VmaPool_T::~VmaPool_T()
    10132 {
    10133 }
    10134 
    10135 #if VMA_STATS_STRING_ENABLED
    10136 
    10137 #endif // #if VMA_STATS_STRING_ENABLED
    10138 
    10139 VmaBlockVector::VmaBlockVector(
    10140  VmaAllocator hAllocator,
    10141  uint32_t memoryTypeIndex,
    10142  VkDeviceSize preferredBlockSize,
    10143  size_t minBlockCount,
    10144  size_t maxBlockCount,
    10145  VkDeviceSize bufferImageGranularity,
    10146  uint32_t frameInUseCount,
    10147  bool isCustomPool,
    10148  bool explicitBlockSize,
    10149  uint32_t algorithm) :
    10150  m_hAllocator(hAllocator),
    10151  m_MemoryTypeIndex(memoryTypeIndex),
    10152  m_PreferredBlockSize(preferredBlockSize),
    10153  m_MinBlockCount(minBlockCount),
    10154  m_MaxBlockCount(maxBlockCount),
    10155  m_BufferImageGranularity(bufferImageGranularity),
    10156  m_FrameInUseCount(frameInUseCount),
    10157  m_IsCustomPool(isCustomPool),
    10158  m_ExplicitBlockSize(explicitBlockSize),
    10159  m_Algorithm(algorithm),
    10160  m_HasEmptyBlock(false),
    10161  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10162  m_pDefragmentator(VMA_NULL),
    10163  m_NextBlockId(0)
    10164 {
    10165 }
    10166 
    10167 VmaBlockVector::~VmaBlockVector()
    10168 {
    10169  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10170 
    10171  for(size_t i = m_Blocks.size(); i--; )
    10172  {
    10173  m_Blocks[i]->Destroy(m_hAllocator);
    10174  vma_delete(m_hAllocator, m_Blocks[i]);
    10175  }
    10176 }
    10177 
    10178 VkResult VmaBlockVector::CreateMinBlocks()
    10179 {
    10180  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10181  {
    10182  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10183  if(res != VK_SUCCESS)
    10184  {
    10185  return res;
    10186  }
    10187  }
    10188  return VK_SUCCESS;
    10189 }
    10190 
    10191 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10192 {
    10193  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10194 
    10195  const size_t blockCount = m_Blocks.size();
    10196 
    10197  pStats->size = 0;
    10198  pStats->unusedSize = 0;
    10199  pStats->allocationCount = 0;
    10200  pStats->unusedRangeCount = 0;
    10201  pStats->unusedRangeSizeMax = 0;
    10202  pStats->blockCount = blockCount;
    10203 
    10204  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10205  {
    10206  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10207  VMA_ASSERT(pBlock);
    10208  VMA_HEAVY_ASSERT(pBlock->Validate());
    10209  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10210  }
    10211 }
    10212 
    10213 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10214 {
    10215  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10216  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10217  (VMA_DEBUG_MARGIN > 0) &&
    10218  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10219 }
    10220 
    10221 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10222 
    10223 VkResult VmaBlockVector::Allocate(
    10224  VmaPool hCurrentPool,
    10225  uint32_t currentFrameIndex,
    10226  VkDeviceSize size,
    10227  VkDeviceSize alignment,
    10228  const VmaAllocationCreateInfo& createInfo,
    10229  VmaSuballocationType suballocType,
    10230  VmaAllocation* pAllocation)
    10231 {
    10232  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10233  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10234  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10235  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10236  const bool canCreateNewBlock =
    10237  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10238  (m_Blocks.size() < m_MaxBlockCount);
    10239  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10240 
    10241  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10242  // Which in turn is available only when maxBlockCount = 1.
    10243  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10244  {
    10245  canMakeOtherLost = false;
    10246  }
    10247 
    10248  // Upper address can only be used with linear allocator and within single memory block.
    10249  if(isUpperAddress &&
    10250  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10251  {
    10252  return VK_ERROR_FEATURE_NOT_PRESENT;
    10253  }
    10254 
    10255  // Validate strategy.
    10256  switch(strategy)
    10257  {
    10258  case 0:
    10260  break;
    10264  break;
    10265  default:
    10266  return VK_ERROR_FEATURE_NOT_PRESENT;
    10267  }
    10268 
    10269  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10270  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10271  {
    10272  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10273  }
    10274 
    10275  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10276 
    10277  /*
    10278  Under certain condition, this whole section can be skipped for optimization, so
    10279  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10280  e.g. for custom pools with linear algorithm.
    10281  */
    10282  if(!canMakeOtherLost || canCreateNewBlock)
    10283  {
    10284  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10285  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10287 
    10288  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10289  {
    10290  // Use only last block.
    10291  if(!m_Blocks.empty())
    10292  {
    10293  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10294  VMA_ASSERT(pCurrBlock);
    10295  VkResult res = AllocateFromBlock(
    10296  pCurrBlock,
    10297  hCurrentPool,
    10298  currentFrameIndex,
    10299  size,
    10300  alignment,
    10301  allocFlagsCopy,
    10302  createInfo.pUserData,
    10303  suballocType,
    10304  strategy,
    10305  pAllocation);
    10306  if(res == VK_SUCCESS)
    10307  {
    10308  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10309  return VK_SUCCESS;
    10310  }
    10311  }
    10312  }
    10313  else
    10314  {
    10316  {
    10317  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10318  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10319  {
    10320  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10321  VMA_ASSERT(pCurrBlock);
    10322  VkResult res = AllocateFromBlock(
    10323  pCurrBlock,
    10324  hCurrentPool,
    10325  currentFrameIndex,
    10326  size,
    10327  alignment,
    10328  allocFlagsCopy,
    10329  createInfo.pUserData,
    10330  suballocType,
    10331  strategy,
    10332  pAllocation);
    10333  if(res == VK_SUCCESS)
    10334  {
    10335  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10336  return VK_SUCCESS;
    10337  }
    10338  }
    10339  }
    10340  else // WORST_FIT, FIRST_FIT
    10341  {
    10342  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10343  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10344  {
    10345  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10346  VMA_ASSERT(pCurrBlock);
    10347  VkResult res = AllocateFromBlock(
    10348  pCurrBlock,
    10349  hCurrentPool,
    10350  currentFrameIndex,
    10351  size,
    10352  alignment,
    10353  allocFlagsCopy,
    10354  createInfo.pUserData,
    10355  suballocType,
    10356  strategy,
    10357  pAllocation);
    10358  if(res == VK_SUCCESS)
    10359  {
    10360  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10361  return VK_SUCCESS;
    10362  }
    10363  }
    10364  }
    10365  }
    10366 
    10367  // 2. Try to create new block.
    10368  if(canCreateNewBlock)
    10369  {
    10370  // Calculate optimal size for new block.
    10371  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10372  uint32_t newBlockSizeShift = 0;
    10373  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10374 
    10375  if(!m_ExplicitBlockSize)
    10376  {
    10377  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10378  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10379  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10380  {
    10381  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10382  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10383  {
    10384  newBlockSize = smallerNewBlockSize;
    10385  ++newBlockSizeShift;
    10386  }
    10387  else
    10388  {
    10389  break;
    10390  }
    10391  }
    10392  }
    10393 
    10394  size_t newBlockIndex = 0;
    10395  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10396  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10397  if(!m_ExplicitBlockSize)
    10398  {
    10399  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10400  {
    10401  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10402  if(smallerNewBlockSize >= size)
    10403  {
    10404  newBlockSize = smallerNewBlockSize;
    10405  ++newBlockSizeShift;
    10406  res = CreateBlock(newBlockSize, &newBlockIndex);
    10407  }
    10408  else
    10409  {
    10410  break;
    10411  }
    10412  }
    10413  }
    10414 
    10415  if(res == VK_SUCCESS)
    10416  {
    10417  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10418  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10419 
    10420  res = AllocateFromBlock(
    10421  pBlock,
    10422  hCurrentPool,
    10423  currentFrameIndex,
    10424  size,
    10425  alignment,
    10426  allocFlagsCopy,
    10427  createInfo.pUserData,
    10428  suballocType,
    10429  strategy,
    10430  pAllocation);
    10431  if(res == VK_SUCCESS)
    10432  {
    10433  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10434  return VK_SUCCESS;
    10435  }
    10436  else
    10437  {
    10438  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10439  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10440  }
    10441  }
    10442  }
    10443  }
    10444 
    10445  // 3. Try to allocate from existing blocks with making other allocations lost.
    10446  if(canMakeOtherLost)
    10447  {
    10448  uint32_t tryIndex = 0;
    10449  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10450  {
    10451  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10452  VmaAllocationRequest bestRequest = {};
    10453  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10454 
    10455  // 1. Search existing allocations.
    10457  {
    10458  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10459  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10460  {
    10461  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10462  VMA_ASSERT(pCurrBlock);
    10463  VmaAllocationRequest currRequest = {};
    10464  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10465  currentFrameIndex,
    10466  m_FrameInUseCount,
    10467  m_BufferImageGranularity,
    10468  size,
    10469  alignment,
    10470  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10471  suballocType,
    10472  canMakeOtherLost,
    10473  strategy,
    10474  &currRequest))
    10475  {
    10476  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10477  if(pBestRequestBlock == VMA_NULL ||
    10478  currRequestCost < bestRequestCost)
    10479  {
    10480  pBestRequestBlock = pCurrBlock;
    10481  bestRequest = currRequest;
    10482  bestRequestCost = currRequestCost;
    10483 
    10484  if(bestRequestCost == 0)
    10485  {
    10486  break;
    10487  }
    10488  }
    10489  }
    10490  }
    10491  }
    10492  else // WORST_FIT, FIRST_FIT
    10493  {
    10494  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10495  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10496  {
    10497  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10498  VMA_ASSERT(pCurrBlock);
    10499  VmaAllocationRequest currRequest = {};
    10500  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10501  currentFrameIndex,
    10502  m_FrameInUseCount,
    10503  m_BufferImageGranularity,
    10504  size,
    10505  alignment,
    10506  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10507  suballocType,
    10508  canMakeOtherLost,
    10509  strategy,
    10510  &currRequest))
    10511  {
    10512  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10513  if(pBestRequestBlock == VMA_NULL ||
    10514  currRequestCost < bestRequestCost ||
    10516  {
    10517  pBestRequestBlock = pCurrBlock;
    10518  bestRequest = currRequest;
    10519  bestRequestCost = currRequestCost;
    10520 
    10521  if(bestRequestCost == 0 ||
    10523  {
    10524  break;
    10525  }
    10526  }
    10527  }
    10528  }
    10529  }
    10530 
    10531  if(pBestRequestBlock != VMA_NULL)
    10532  {
    10533  if(mapped)
    10534  {
    10535  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10536  if(res != VK_SUCCESS)
    10537  {
    10538  return res;
    10539  }
    10540  }
    10541 
    10542  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10543  currentFrameIndex,
    10544  m_FrameInUseCount,
    10545  &bestRequest))
    10546  {
    10547  // We no longer have an empty Allocation.
    10548  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10549  {
    10550  m_HasEmptyBlock = false;
    10551  }
    10552  // Allocate from this pBlock.
    10553  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10554  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10555  (*pAllocation)->InitBlockAllocation(
    10556  hCurrentPool,
    10557  pBestRequestBlock,
    10558  bestRequest.offset,
    10559  alignment,
    10560  size,
    10561  suballocType,
    10562  mapped,
    10563  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10564  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10565  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10566  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10567  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10568  {
    10569  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10570  }
    10571  if(IsCorruptionDetectionEnabled())
    10572  {
    10573  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10574  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10575  }
    10576  return VK_SUCCESS;
    10577  }
    10578  // else: Some allocations must have been touched while we are here. Next try.
    10579  }
    10580  else
    10581  {
    10582  // Could not find place in any of the blocks - break outer loop.
    10583  break;
    10584  }
    10585  }
    10586  /* Maximum number of tries exceeded - a very unlike event when many other
    10587  threads are simultaneously touching allocations making it impossible to make
    10588  lost at the same time as we try to allocate. */
    10589  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10590  {
    10591  return VK_ERROR_TOO_MANY_OBJECTS;
    10592  }
    10593  }
    10594 
    10595  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10596 }
    10597 
    10598 void VmaBlockVector::Free(
    10599  VmaAllocation hAllocation)
    10600 {
    10601  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10602 
    10603  // Scope for lock.
    10604  {
    10605  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10606 
    10607  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10608 
    10609  if(IsCorruptionDetectionEnabled())
    10610  {
    10611  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10612  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10613  }
    10614 
    10615  if(hAllocation->IsPersistentMap())
    10616  {
    10617  pBlock->Unmap(m_hAllocator, 1);
    10618  }
    10619 
    10620  pBlock->m_pMetadata->Free(hAllocation);
    10621  VMA_HEAVY_ASSERT(pBlock->Validate());
    10622 
    10623  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10624 
    10625  // pBlock became empty after this deallocation.
    10626  if(pBlock->m_pMetadata->IsEmpty())
    10627  {
    10628  // Already has empty Allocation. We don't want to have two, so delete this one.
    10629  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10630  {
    10631  pBlockToDelete = pBlock;
    10632  Remove(pBlock);
    10633  }
    10634  // We now have first empty block.
    10635  else
    10636  {
    10637  m_HasEmptyBlock = true;
    10638  }
    10639  }
    10640  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10641  // (This is optional, heuristics.)
    10642  else if(m_HasEmptyBlock)
    10643  {
    10644  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10645  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10646  {
    10647  pBlockToDelete = pLastBlock;
    10648  m_Blocks.pop_back();
    10649  m_HasEmptyBlock = false;
    10650  }
    10651  }
    10652 
    10653  IncrementallySortBlocks();
    10654  }
    10655 
    10656  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10657  // lock, for performance reason.
    10658  if(pBlockToDelete != VMA_NULL)
    10659  {
    10660  VMA_DEBUG_LOG(" Deleted empty allocation");
    10661  pBlockToDelete->Destroy(m_hAllocator);
    10662  vma_delete(m_hAllocator, pBlockToDelete);
    10663  }
    10664 }
    10665 
    10666 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10667 {
    10668  VkDeviceSize result = 0;
    10669  for(size_t i = m_Blocks.size(); i--; )
    10670  {
    10671  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10672  if(result >= m_PreferredBlockSize)
    10673  {
    10674  break;
    10675  }
    10676  }
    10677  return result;
    10678 }
    10679 
    10680 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10681 {
    10682  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10683  {
    10684  if(m_Blocks[blockIndex] == pBlock)
    10685  {
    10686  VmaVectorRemove(m_Blocks, blockIndex);
    10687  return;
    10688  }
    10689  }
    10690  VMA_ASSERT(0);
    10691 }
    10692 
    10693 void VmaBlockVector::IncrementallySortBlocks()
    10694 {
    10695  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10696  {
    10697  // Bubble sort only until first swap.
    10698  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10699  {
    10700  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10701  {
    10702  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10703  return;
    10704  }
    10705  }
    10706  }
    10707 }
    10708 
    10709 VkResult VmaBlockVector::AllocateFromBlock(
    10710  VmaDeviceMemoryBlock* pBlock,
    10711  VmaPool hCurrentPool,
    10712  uint32_t currentFrameIndex,
    10713  VkDeviceSize size,
    10714  VkDeviceSize alignment,
    10715  VmaAllocationCreateFlags allocFlags,
    10716  void* pUserData,
    10717  VmaSuballocationType suballocType,
    10718  uint32_t strategy,
    10719  VmaAllocation* pAllocation)
    10720 {
    10721  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10722  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10723  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10724  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10725 
    10726  VmaAllocationRequest currRequest = {};
    10727  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10728  currentFrameIndex,
    10729  m_FrameInUseCount,
    10730  m_BufferImageGranularity,
    10731  size,
    10732  alignment,
    10733  isUpperAddress,
    10734  suballocType,
    10735  false, // canMakeOtherLost
    10736  strategy,
    10737  &currRequest))
    10738  {
    10739  // Allocate from pCurrBlock.
    10740  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10741 
    10742  if(mapped)
    10743  {
    10744  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10745  if(res != VK_SUCCESS)
    10746  {
    10747  return res;
    10748  }
    10749  }
    10750 
    10751  // We no longer have an empty Allocation.
    10752  if(pBlock->m_pMetadata->IsEmpty())
    10753  {
    10754  m_HasEmptyBlock = false;
    10755  }
    10756 
    10757  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10758  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10759  (*pAllocation)->InitBlockAllocation(
    10760  hCurrentPool,
    10761  pBlock,
    10762  currRequest.offset,
    10763  alignment,
    10764  size,
    10765  suballocType,
    10766  mapped,
    10767  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10768  VMA_HEAVY_ASSERT(pBlock->Validate());
    10769  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10770  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10771  {
    10772  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10773  }
    10774  if(IsCorruptionDetectionEnabled())
    10775  {
    10776  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10777  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10778  }
    10779  return VK_SUCCESS;
    10780  }
    10781  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10782 }
    10783 
    10784 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10785 {
    10786  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10787  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10788  allocInfo.allocationSize = blockSize;
    10789  VkDeviceMemory mem = VK_NULL_HANDLE;
    10790  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10791  if(res < 0)
    10792  {
    10793  return res;
    10794  }
    10795 
    10796  // New VkDeviceMemory successfully created.
    10797 
    10798  // Create new Allocation for it.
    10799  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10800  pBlock->Init(
    10801  m_hAllocator,
    10802  m_MemoryTypeIndex,
    10803  mem,
    10804  allocInfo.allocationSize,
    10805  m_NextBlockId++,
    10806  m_Algorithm);
    10807 
    10808  m_Blocks.push_back(pBlock);
    10809  if(pNewBlockIndex != VMA_NULL)
    10810  {
    10811  *pNewBlockIndex = m_Blocks.size() - 1;
    10812  }
    10813 
    10814  return VK_SUCCESS;
    10815 }
    10816 
    10817 #if VMA_STATS_STRING_ENABLED
    10818 
    10819 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10820 {
    10821  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10822 
    10823  json.BeginObject();
    10824 
    10825  if(m_IsCustomPool)
    10826  {
    10827  json.WriteString("MemoryTypeIndex");
    10828  json.WriteNumber(m_MemoryTypeIndex);
    10829 
    10830  json.WriteString("BlockSize");
    10831  json.WriteNumber(m_PreferredBlockSize);
    10832 
    10833  json.WriteString("BlockCount");
    10834  json.BeginObject(true);
    10835  if(m_MinBlockCount > 0)
    10836  {
    10837  json.WriteString("Min");
    10838  json.WriteNumber((uint64_t)m_MinBlockCount);
    10839  }
    10840  if(m_MaxBlockCount < SIZE_MAX)
    10841  {
    10842  json.WriteString("Max");
    10843  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10844  }
    10845  json.WriteString("Cur");
    10846  json.WriteNumber((uint64_t)m_Blocks.size());
    10847  json.EndObject();
    10848 
    10849  if(m_FrameInUseCount > 0)
    10850  {
    10851  json.WriteString("FrameInUseCount");
    10852  json.WriteNumber(m_FrameInUseCount);
    10853  }
    10854 
    10855  if(m_Algorithm != 0)
    10856  {
    10857  json.WriteString("Algorithm");
    10858  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10859  }
    10860  }
    10861  else
    10862  {
    10863  json.WriteString("PreferredBlockSize");
    10864  json.WriteNumber(m_PreferredBlockSize);
    10865  }
    10866 
    10867  json.WriteString("Blocks");
    10868  json.BeginObject();
    10869  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10870  {
    10871  json.BeginString();
    10872  json.ContinueString(m_Blocks[i]->GetId());
    10873  json.EndString();
    10874 
    10875  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10876  }
    10877  json.EndObject();
    10878 
    10879  json.EndObject();
    10880 }
    10881 
    10882 #endif // #if VMA_STATS_STRING_ENABLED
    10883 
    10884 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10885  VmaAllocator hAllocator,
    10886  uint32_t currentFrameIndex)
    10887 {
    10888  if(m_pDefragmentator == VMA_NULL)
    10889  {
    10890  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10891  hAllocator,
    10892  this,
    10893  currentFrameIndex);
    10894  }
    10895 
    10896  return m_pDefragmentator;
    10897 }
    10898 
    10899 VkResult VmaBlockVector::Defragment(
    10900  VmaDefragmentationStats* pDefragmentationStats,
    10901  VkDeviceSize& maxBytesToMove,
    10902  uint32_t& maxAllocationsToMove)
    10903 {
    10904  if(m_pDefragmentator == VMA_NULL)
    10905  {
    10906  return VK_SUCCESS;
    10907  }
    10908 
    10909  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10910 
    10911  // Defragment.
    10912  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10913 
    10914  // Accumulate statistics.
    10915  if(pDefragmentationStats != VMA_NULL)
    10916  {
    10917  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10918  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10919  pDefragmentationStats->bytesMoved += bytesMoved;
    10920  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10921  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10922  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10923  maxBytesToMove -= bytesMoved;
    10924  maxAllocationsToMove -= allocationsMoved;
    10925  }
    10926 
    10927  // Free empty blocks.
    10928  m_HasEmptyBlock = false;
    10929  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10930  {
    10931  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10932  if(pBlock->m_pMetadata->IsEmpty())
    10933  {
    10934  if(m_Blocks.size() > m_MinBlockCount)
    10935  {
    10936  if(pDefragmentationStats != VMA_NULL)
    10937  {
    10938  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10939  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10940  }
    10941 
    10942  VmaVectorRemove(m_Blocks, blockIndex);
    10943  pBlock->Destroy(m_hAllocator);
    10944  vma_delete(m_hAllocator, pBlock);
    10945  }
    10946  else
    10947  {
    10948  m_HasEmptyBlock = true;
    10949  }
    10950  }
    10951  }
    10952 
    10953  return result;
    10954 }
    10955 
    10956 void VmaBlockVector::DestroyDefragmentator()
    10957 {
    10958  if(m_pDefragmentator != VMA_NULL)
    10959  {
    10960  vma_delete(m_hAllocator, m_pDefragmentator);
    10961  m_pDefragmentator = VMA_NULL;
    10962  }
    10963 }
    10964 
    10965 void VmaBlockVector::MakePoolAllocationsLost(
    10966  uint32_t currentFrameIndex,
    10967  size_t* pLostAllocationCount)
    10968 {
    10969  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10970  size_t lostAllocationCount = 0;
    10971  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10972  {
    10973  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10974  VMA_ASSERT(pBlock);
    10975  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10976  }
    10977  if(pLostAllocationCount != VMA_NULL)
    10978  {
    10979  *pLostAllocationCount = lostAllocationCount;
    10980  }
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CheckCorruption()
    10984 {
    10985  if(!IsCorruptionDetectionEnabled())
    10986  {
    10987  return VK_ERROR_FEATURE_NOT_PRESENT;
    10988  }
    10989 
    10990  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10991  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10992  {
    10993  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10994  VMA_ASSERT(pBlock);
    10995  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    10996  if(res != VK_SUCCESS)
    10997  {
    10998  return res;
    10999  }
    11000  }
    11001  return VK_SUCCESS;
    11002 }
    11003 
    11004 void VmaBlockVector::AddStats(VmaStats* pStats)
    11005 {
    11006  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11007  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11008 
    11009  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11010 
    11011  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11012  {
    11013  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11014  VMA_ASSERT(pBlock);
    11015  VMA_HEAVY_ASSERT(pBlock->Validate());
    11016  VmaStatInfo allocationStatInfo;
    11017  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11018  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11019  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11020  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11021  }
    11022 }
    11023 
    11025 // VmaDefragmentator members definition
    11026 
    11027 VmaDefragmentator::VmaDefragmentator(
    11028  VmaAllocator hAllocator,
    11029  VmaBlockVector* pBlockVector,
    11030  uint32_t currentFrameIndex) :
    11031  m_hAllocator(hAllocator),
    11032  m_pBlockVector(pBlockVector),
    11033  m_CurrentFrameIndex(currentFrameIndex),
    11034  m_BytesMoved(0),
    11035  m_AllocationsMoved(0),
    11036  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11037  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11038 {
    11039  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11040 }
    11041 
    11042 VmaDefragmentator::~VmaDefragmentator()
    11043 {
    11044  for(size_t i = m_Blocks.size(); i--; )
    11045  {
    11046  vma_delete(m_hAllocator, m_Blocks[i]);
    11047  }
    11048 }
    11049 
    11050 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11051 {
    11052  AllocationInfo allocInfo;
    11053  allocInfo.m_hAllocation = hAlloc;
    11054  allocInfo.m_pChanged = pChanged;
    11055  m_Allocations.push_back(allocInfo);
    11056 }
    11057 
    11058 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11059 {
    11060  // It has already been mapped for defragmentation.
    11061  if(m_pMappedDataForDefragmentation)
    11062  {
    11063  *ppMappedData = m_pMappedDataForDefragmentation;
    11064  return VK_SUCCESS;
    11065  }
    11066 
    11067  // It is originally mapped.
    11068  if(m_pBlock->GetMappedData())
    11069  {
    11070  *ppMappedData = m_pBlock->GetMappedData();
    11071  return VK_SUCCESS;
    11072  }
    11073 
    11074  // Map on first usage.
    11075  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11076  *ppMappedData = m_pMappedDataForDefragmentation;
    11077  return res;
    11078 }
    11079 
    11080 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11081 {
    11082  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11083  {
    11084  m_pBlock->Unmap(hAllocator, 1);
    11085  }
    11086 }
    11087 
    11088 VkResult VmaDefragmentator::DefragmentRound(
    11089  VkDeviceSize maxBytesToMove,
    11090  uint32_t maxAllocationsToMove)
    11091 {
    11092  if(m_Blocks.empty())
    11093  {
    11094  return VK_SUCCESS;
    11095  }
    11096 
    11097  size_t srcBlockIndex = m_Blocks.size() - 1;
    11098  size_t srcAllocIndex = SIZE_MAX;
    11099  for(;;)
    11100  {
    11101  // 1. Find next allocation to move.
    11102  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11103  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11104  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11105  {
    11106  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11107  {
    11108  // Finished: no more allocations to process.
    11109  if(srcBlockIndex == 0)
    11110  {
    11111  return VK_SUCCESS;
    11112  }
    11113  else
    11114  {
    11115  --srcBlockIndex;
    11116  srcAllocIndex = SIZE_MAX;
    11117  }
    11118  }
    11119  else
    11120  {
    11121  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11122  }
    11123  }
    11124 
    11125  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11126  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11127 
    11128  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11129  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11130  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11131  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11132 
    11133  // 2. Try to find new place for this allocation in preceding or current block.
    11134  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11135  {
    11136  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11137  VmaAllocationRequest dstAllocRequest;
    11138  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11139  m_CurrentFrameIndex,
    11140  m_pBlockVector->GetFrameInUseCount(),
    11141  m_pBlockVector->GetBufferImageGranularity(),
    11142  size,
    11143  alignment,
    11144  false, // upperAddress
    11145  suballocType,
    11146  false, // canMakeOtherLost
    11148  &dstAllocRequest) &&
    11149  MoveMakesSense(
    11150  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11151  {
    11152  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11153 
    11154  // Reached limit on number of allocations or bytes to move.
    11155  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11156  (m_BytesMoved + size > maxBytesToMove))
    11157  {
    11158  return VK_INCOMPLETE;
    11159  }
    11160 
    11161  void* pDstMappedData = VMA_NULL;
    11162  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11163  if(res != VK_SUCCESS)
    11164  {
    11165  return res;
    11166  }
    11167 
    11168  void* pSrcMappedData = VMA_NULL;
    11169  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11170  if(res != VK_SUCCESS)
    11171  {
    11172  return res;
    11173  }
    11174 
    11175  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11176  memcpy(
    11177  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11178  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11179  static_cast<size_t>(size));
    11180 
    11181  if(VMA_DEBUG_MARGIN > 0)
    11182  {
    11183  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11184  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11185  }
    11186 
    11187  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11188  dstAllocRequest,
    11189  suballocType,
    11190  size,
    11191  false, // upperAddress
    11192  allocInfo.m_hAllocation);
    11193  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11194 
    11195  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11196 
    11197  if(allocInfo.m_pChanged != VMA_NULL)
    11198  {
    11199  *allocInfo.m_pChanged = VK_TRUE;
    11200  }
    11201 
    11202  ++m_AllocationsMoved;
    11203  m_BytesMoved += size;
    11204 
    11205  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11206 
    11207  break;
    11208  }
    11209  }
    11210 
    11211  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11212 
    11213  if(srcAllocIndex > 0)
    11214  {
    11215  --srcAllocIndex;
    11216  }
    11217  else
    11218  {
    11219  if(srcBlockIndex > 0)
    11220  {
    11221  --srcBlockIndex;
    11222  srcAllocIndex = SIZE_MAX;
    11223  }
    11224  else
    11225  {
    11226  return VK_SUCCESS;
    11227  }
    11228  }
    11229  }
    11230 }
    11231 
    11232 VkResult VmaDefragmentator::Defragment(
    11233  VkDeviceSize maxBytesToMove,
    11234  uint32_t maxAllocationsToMove)
    11235 {
    11236  if(m_Allocations.empty())
    11237  {
    11238  return VK_SUCCESS;
    11239  }
    11240 
    11241  // Create block info for each block.
    11242  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11243  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11244  {
    11245  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11246  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11247  m_Blocks.push_back(pBlockInfo);
    11248  }
    11249 
    11250  // Sort them by m_pBlock pointer value.
    11251  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11252 
    11253  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11254  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11255  {
    11256  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11257  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11258  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11259  {
    11260  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11261  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11262  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11263  {
    11264  (*it)->m_Allocations.push_back(allocInfo);
    11265  }
    11266  else
    11267  {
    11268  VMA_ASSERT(0);
    11269  }
    11270  }
    11271  }
    11272  m_Allocations.clear();
    11273 
    11274  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11275  {
    11276  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11277  pBlockInfo->CalcHasNonMovableAllocations();
    11278  pBlockInfo->SortAllocationsBySizeDescecnding();
    11279  }
    11280 
    11281  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11282  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11283 
    11284  // Execute defragmentation rounds (the main part).
    11285  VkResult result = VK_SUCCESS;
    11286  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11287  {
    11288  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11289  }
    11290 
    11291  // Unmap blocks that were mapped for defragmentation.
    11292  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11293  {
    11294  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11295  }
    11296 
    11297  return result;
    11298 }
    11299 
    11300 bool VmaDefragmentator::MoveMakesSense(
    11301  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11302  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11303 {
    11304  if(dstBlockIndex < srcBlockIndex)
    11305  {
    11306  return true;
    11307  }
    11308  if(dstBlockIndex > srcBlockIndex)
    11309  {
    11310  return false;
    11311  }
    11312  if(dstOffset < srcOffset)
    11313  {
    11314  return true;
    11315  }
    11316  return false;
    11317 }
    11318 
    11320 // VmaRecorder
    11321 
    11322 #if VMA_RECORDING_ENABLED
    11323 
    11324 VmaRecorder::VmaRecorder() :
    11325  m_UseMutex(true),
    11326  m_Flags(0),
    11327  m_File(VMA_NULL),
    11328  m_Freq(INT64_MAX),
    11329  m_StartCounter(INT64_MAX)
    11330 {
    11331 }
    11332 
    11333 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11334 {
    11335  m_UseMutex = useMutex;
    11336  m_Flags = settings.flags;
    11337 
    11338  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11339  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11340 
    11341  // Open file for writing.
    11342  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11343  if(err != 0)
    11344  {
    11345  return VK_ERROR_INITIALIZATION_FAILED;
    11346  }
    11347 
    11348  // Write header.
    11349  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11350  fprintf(m_File, "%s\n", "1,3");
    11351 
    11352  return VK_SUCCESS;
    11353 }
    11354 
    11355 VmaRecorder::~VmaRecorder()
    11356 {
    11357  if(m_File != VMA_NULL)
    11358  {
    11359  fclose(m_File);
    11360  }
    11361 }
    11362 
    11363 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11364 {
    11365  CallParams callParams;
    11366  GetBasicParams(callParams);
    11367 
    11368  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11369  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11370  Flush();
    11371 }
    11372 
    11373 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11374 {
    11375  CallParams callParams;
    11376  GetBasicParams(callParams);
    11377 
    11378  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11379  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11380  Flush();
    11381 }
    11382 
    11383 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11384 {
    11385  CallParams callParams;
    11386  GetBasicParams(callParams);
    11387 
    11388  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11389  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11390  createInfo.memoryTypeIndex,
    11391  createInfo.flags,
    11392  createInfo.blockSize,
    11393  (uint64_t)createInfo.minBlockCount,
    11394  (uint64_t)createInfo.maxBlockCount,
    11395  createInfo.frameInUseCount,
    11396  pool);
    11397  Flush();
    11398 }
    11399 
    11400 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11401 {
    11402  CallParams callParams;
    11403  GetBasicParams(callParams);
    11404 
    11405  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11406  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11407  pool);
    11408  Flush();
    11409 }
    11410 
    11411 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11412  const VkMemoryRequirements& vkMemReq,
    11413  const VmaAllocationCreateInfo& createInfo,
    11414  VmaAllocation allocation)
    11415 {
    11416  CallParams callParams;
    11417  GetBasicParams(callParams);
    11418 
    11419  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11420  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11421  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11422  vkMemReq.size,
    11423  vkMemReq.alignment,
    11424  vkMemReq.memoryTypeBits,
    11425  createInfo.flags,
    11426  createInfo.usage,
    11427  createInfo.requiredFlags,
    11428  createInfo.preferredFlags,
    11429  createInfo.memoryTypeBits,
    11430  createInfo.pool,
    11431  allocation,
    11432  userDataStr.GetString());
    11433  Flush();
    11434 }
    11435 
    11436 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11437  const VkMemoryRequirements& vkMemReq,
    11438  bool requiresDedicatedAllocation,
    11439  bool prefersDedicatedAllocation,
    11440  const VmaAllocationCreateInfo& createInfo,
    11441  VmaAllocation allocation)
    11442 {
    11443  CallParams callParams;
    11444  GetBasicParams(callParams);
    11445 
    11446  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11447  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11448  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11449  vkMemReq.size,
    11450  vkMemReq.alignment,
    11451  vkMemReq.memoryTypeBits,
    11452  requiresDedicatedAllocation ? 1 : 0,
    11453  prefersDedicatedAllocation ? 1 : 0,
    11454  createInfo.flags,
    11455  createInfo.usage,
    11456  createInfo.requiredFlags,
    11457  createInfo.preferredFlags,
    11458  createInfo.memoryTypeBits,
    11459  createInfo.pool,
    11460  allocation,
    11461  userDataStr.GetString());
    11462  Flush();
    11463 }
    11464 
    11465 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11466  const VkMemoryRequirements& vkMemReq,
    11467  bool requiresDedicatedAllocation,
    11468  bool prefersDedicatedAllocation,
    11469  const VmaAllocationCreateInfo& createInfo,
    11470  VmaAllocation allocation)
    11471 {
    11472  CallParams callParams;
    11473  GetBasicParams(callParams);
    11474 
    11475  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11476  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11477  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11478  vkMemReq.size,
    11479  vkMemReq.alignment,
    11480  vkMemReq.memoryTypeBits,
    11481  requiresDedicatedAllocation ? 1 : 0,
    11482  prefersDedicatedAllocation ? 1 : 0,
    11483  createInfo.flags,
    11484  createInfo.usage,
    11485  createInfo.requiredFlags,
    11486  createInfo.preferredFlags,
    11487  createInfo.memoryTypeBits,
    11488  createInfo.pool,
    11489  allocation,
    11490  userDataStr.GetString());
    11491  Flush();
    11492 }
    11493 
    11494 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11495  VmaAllocation allocation)
    11496 {
    11497  CallParams callParams;
    11498  GetBasicParams(callParams);
    11499 
    11500  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11501  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11502  allocation);
    11503  Flush();
    11504 }
    11505 
    11506 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11507  VmaAllocation allocation,
    11508  const void* pUserData)
    11509 {
    11510  CallParams callParams;
    11511  GetBasicParams(callParams);
    11512 
    11513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11514  UserDataString userDataStr(
    11515  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11516  pUserData);
    11517  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11518  allocation,
    11519  userDataStr.GetString());
    11520  Flush();
    11521 }
    11522 
    11523 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11524  VmaAllocation allocation)
    11525 {
    11526  CallParams callParams;
    11527  GetBasicParams(callParams);
    11528 
    11529  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11530  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11531  allocation);
    11532  Flush();
    11533 }
    11534 
    11535 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11536  VmaAllocation allocation)
    11537 {
    11538  CallParams callParams;
    11539  GetBasicParams(callParams);
    11540 
    11541  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11542  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11543  allocation);
    11544  Flush();
    11545 }
    11546 
    11547 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11548  VmaAllocation allocation)
    11549 {
    11550  CallParams callParams;
    11551  GetBasicParams(callParams);
    11552 
    11553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11554  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11555  allocation);
    11556  Flush();
    11557 }
    11558 
    11559 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11560  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11561 {
    11562  CallParams callParams;
    11563  GetBasicParams(callParams);
    11564 
    11565  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11566  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11567  allocation,
    11568  offset,
    11569  size);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11574  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11575 {
    11576  CallParams callParams;
    11577  GetBasicParams(callParams);
    11578 
    11579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11580  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11581  allocation,
    11582  offset,
    11583  size);
    11584  Flush();
    11585 }
    11586 
    11587 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11588  const VkBufferCreateInfo& bufCreateInfo,
    11589  const VmaAllocationCreateInfo& allocCreateInfo,
    11590  VmaAllocation allocation)
    11591 {
    11592  CallParams callParams;
    11593  GetBasicParams(callParams);
    11594 
    11595  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11596  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11597  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11598  bufCreateInfo.flags,
    11599  bufCreateInfo.size,
    11600  bufCreateInfo.usage,
    11601  bufCreateInfo.sharingMode,
    11602  allocCreateInfo.flags,
    11603  allocCreateInfo.usage,
    11604  allocCreateInfo.requiredFlags,
    11605  allocCreateInfo.preferredFlags,
    11606  allocCreateInfo.memoryTypeBits,
    11607  allocCreateInfo.pool,
    11608  allocation,
    11609  userDataStr.GetString());
    11610  Flush();
    11611 }
    11612 
    11613 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11614  const VkImageCreateInfo& imageCreateInfo,
    11615  const VmaAllocationCreateInfo& allocCreateInfo,
    11616  VmaAllocation allocation)
    11617 {
    11618  CallParams callParams;
    11619  GetBasicParams(callParams);
    11620 
    11621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11622  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11623  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11624  imageCreateInfo.flags,
    11625  imageCreateInfo.imageType,
    11626  imageCreateInfo.format,
    11627  imageCreateInfo.extent.width,
    11628  imageCreateInfo.extent.height,
    11629  imageCreateInfo.extent.depth,
    11630  imageCreateInfo.mipLevels,
    11631  imageCreateInfo.arrayLayers,
    11632  imageCreateInfo.samples,
    11633  imageCreateInfo.tiling,
    11634  imageCreateInfo.usage,
    11635  imageCreateInfo.sharingMode,
    11636  imageCreateInfo.initialLayout,
    11637  allocCreateInfo.flags,
    11638  allocCreateInfo.usage,
    11639  allocCreateInfo.requiredFlags,
    11640  allocCreateInfo.preferredFlags,
    11641  allocCreateInfo.memoryTypeBits,
    11642  allocCreateInfo.pool,
    11643  allocation,
    11644  userDataStr.GetString());
    11645  Flush();
    11646 }
    11647 
    11648 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11649  VmaAllocation allocation)
    11650 {
    11651  CallParams callParams;
    11652  GetBasicParams(callParams);
    11653 
    11654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11655  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11656  allocation);
    11657  Flush();
    11658 }
    11659 
    11660 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11661  VmaAllocation allocation)
    11662 {
    11663  CallParams callParams;
    11664  GetBasicParams(callParams);
    11665 
    11666  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11667  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11668  allocation);
    11669  Flush();
    11670 }
    11671 
    11672 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11673  VmaAllocation allocation)
    11674 {
    11675  CallParams callParams;
    11676  GetBasicParams(callParams);
    11677 
    11678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11679  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11680  allocation);
    11681  Flush();
    11682 }
    11683 
    11684 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11685  VmaAllocation allocation)
    11686 {
    11687  CallParams callParams;
    11688  GetBasicParams(callParams);
    11689 
    11690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11691  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11692  allocation);
    11693  Flush();
    11694 }
    11695 
    11696 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11697  VmaPool pool)
    11698 {
    11699  CallParams callParams;
    11700  GetBasicParams(callParams);
    11701 
    11702  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11703  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11704  pool);
    11705  Flush();
    11706 }
    11707 
    11708 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11709 {
    11710  if(pUserData != VMA_NULL)
    11711  {
    11712  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11713  {
    11714  m_Str = (const char*)pUserData;
    11715  }
    11716  else
    11717  {
    11718  sprintf_s(m_PtrStr, "%p", pUserData);
    11719  m_Str = m_PtrStr;
    11720  }
    11721  }
    11722  else
    11723  {
    11724  m_Str = "";
    11725  }
    11726 }
    11727 
    11728 void VmaRecorder::WriteConfiguration(
    11729  const VkPhysicalDeviceProperties& devProps,
    11730  const VkPhysicalDeviceMemoryProperties& memProps,
    11731  bool dedicatedAllocationExtensionEnabled)
    11732 {
    11733  fprintf(m_File, "Config,Begin\n");
    11734 
    11735  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11736  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11737  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11738  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11739  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11740  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11741 
    11742  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11743  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11744  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11745 
    11746  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11747  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11748  {
    11749  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11750  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11751  }
    11752  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11753  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11754  {
    11755  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11756  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11757  }
    11758 
    11759  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11760 
    11761  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11762  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11763  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11764  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11765  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11766  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11767  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11768  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11769  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11770 
    11771  fprintf(m_File, "Config,End\n");
    11772 }
    11773 
    11774 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11775 {
    11776  outParams.threadId = GetCurrentThreadId();
    11777 
    11778  LARGE_INTEGER counter;
    11779  QueryPerformanceCounter(&counter);
    11780  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11781 }
    11782 
    11783 void VmaRecorder::Flush()
    11784 {
    11785  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11786  {
    11787  fflush(m_File);
    11788  }
    11789 }
    11790 
    11791 #endif // #if VMA_RECORDING_ENABLED
    11792 
    11794 // VmaAllocator_T
    11795 
    11796 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11797  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11798  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11799  m_hDevice(pCreateInfo->device),
    11800  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11801  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11802  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11803  m_PreferredLargeHeapBlockSize(0),
    11804  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11805  m_CurrentFrameIndex(0),
    11806  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11807  m_NextPoolId(0)
    11809  ,m_pRecorder(VMA_NULL)
    11810 #endif
    11811 {
    11812  if(VMA_DEBUG_DETECT_CORRUPTION)
    11813  {
    11814  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11815  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11816  }
    11817 
    11818  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11819 
    11820 #if !(VMA_DEDICATED_ALLOCATION)
    11822  {
    11823  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11824  }
    11825 #endif
    11826 
    11827  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11828  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11829  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11830 
    11831  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11832  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11833 
    11834  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11835  {
    11836  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11837  }
    11838 
    11839  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11840  {
    11841  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11842  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11843  }
    11844 
    11845  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11846 
    11847  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11848  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11849 
    11850  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11851  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11852  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11853  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11854 
    11855  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11856  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11857 
    11858  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11859  {
    11860  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11861  {
    11862  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11863  if(limit != VK_WHOLE_SIZE)
    11864  {
    11865  m_HeapSizeLimit[heapIndex] = limit;
    11866  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11867  {
    11868  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11869  }
    11870  }
    11871  }
    11872  }
    11873 
    11874  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11875  {
    11876  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11877 
    11878  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11879  this,
    11880  memTypeIndex,
    11881  preferredBlockSize,
    11882  0,
    11883  SIZE_MAX,
    11884  GetBufferImageGranularity(),
    11885  pCreateInfo->frameInUseCount,
    11886  false, // isCustomPool
    11887  false, // explicitBlockSize
    11888  false); // linearAlgorithm
    11889  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11890  // becase minBlockCount is 0.
    11891  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11892 
    11893  }
    11894 }
    11895 
    11896 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11897 {
    11898  VkResult res = VK_SUCCESS;
    11899 
    11900  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11901  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11902  {
    11903 #if VMA_RECORDING_ENABLED
    11904  m_pRecorder = vma_new(this, VmaRecorder)();
    11905  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11906  if(res != VK_SUCCESS)
    11907  {
    11908  return res;
    11909  }
    11910  m_pRecorder->WriteConfiguration(
    11911  m_PhysicalDeviceProperties,
    11912  m_MemProps,
    11913  m_UseKhrDedicatedAllocation);
    11914  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11915 #else
    11916  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11917  return VK_ERROR_FEATURE_NOT_PRESENT;
    11918 #endif
    11919  }
    11920 
    11921  return res;
    11922 }
    11923 
    11924 VmaAllocator_T::~VmaAllocator_T()
    11925 {
    11926 #if VMA_RECORDING_ENABLED
    11927  if(m_pRecorder != VMA_NULL)
    11928  {
    11929  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11930  vma_delete(this, m_pRecorder);
    11931  }
    11932 #endif
    11933 
    11934  VMA_ASSERT(m_Pools.empty());
    11935 
    11936  for(size_t i = GetMemoryTypeCount(); i--; )
    11937  {
    11938  vma_delete(this, m_pDedicatedAllocations[i]);
    11939  vma_delete(this, m_pBlockVectors[i]);
    11940  }
    11941 }
    11942 
    11943 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11944 {
    11945 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11946  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11947  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11948  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11949  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11950  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11951  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11952  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11953  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11954  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11955  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11956  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11957  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11958  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11959  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11960  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11961  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11962 #if VMA_DEDICATED_ALLOCATION
    11963  if(m_UseKhrDedicatedAllocation)
    11964  {
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11966  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11967  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11968  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11969  }
    11970 #endif // #if VMA_DEDICATED_ALLOCATION
    11971 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11972 
    11973 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11974  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11975 
    11976  if(pVulkanFunctions != VMA_NULL)
    11977  {
    11978  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11979  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11980  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11981  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11982  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11983  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11984  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11985  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11986  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11987  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11988  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11989  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11990  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    11991  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    11992  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    11993  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    11994 #if VMA_DEDICATED_ALLOCATION
    11995  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    11996  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    11997 #endif
    11998  }
    11999 
    12000 #undef VMA_COPY_IF_NOT_NULL
    12001 
    12002  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12003  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12004  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12005  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12006  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12007  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12008  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12009  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12010  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12011  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12012  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12013  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12020 #if VMA_DEDICATED_ALLOCATION
    12021  if(m_UseKhrDedicatedAllocation)
    12022  {
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12025  }
    12026 #endif
    12027 }
    12028 
    12029 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12030 {
    12031  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12032  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12033  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12034  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12035 }
    12036 
    12037 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12038  VkDeviceSize size,
    12039  VkDeviceSize alignment,
    12040  bool dedicatedAllocation,
    12041  VkBuffer dedicatedBuffer,
    12042  VkImage dedicatedImage,
    12043  const VmaAllocationCreateInfo& createInfo,
    12044  uint32_t memTypeIndex,
    12045  VmaSuballocationType suballocType,
    12046  VmaAllocation* pAllocation)
    12047 {
    12048  VMA_ASSERT(pAllocation != VMA_NULL);
    12049  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12050 
    12051  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12052 
    12053  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12054  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12055  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12056  {
    12057  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12058  }
    12059 
    12060  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12061  VMA_ASSERT(blockVector);
    12062 
    12063  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12064  bool preferDedicatedMemory =
    12065  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12066  dedicatedAllocation ||
    12067  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12068  size > preferredBlockSize / 2;
    12069 
    12070  if(preferDedicatedMemory &&
    12071  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12072  finalCreateInfo.pool == VK_NULL_HANDLE)
    12073  {
    12075  }
    12076 
    12077  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12078  {
    12079  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12080  {
    12081  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12082  }
    12083  else
    12084  {
    12085  return AllocateDedicatedMemory(
    12086  size,
    12087  suballocType,
    12088  memTypeIndex,
    12089  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12090  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12091  finalCreateInfo.pUserData,
    12092  dedicatedBuffer,
    12093  dedicatedImage,
    12094  pAllocation);
    12095  }
    12096  }
    12097  else
    12098  {
    12099  VkResult res = blockVector->Allocate(
    12100  VK_NULL_HANDLE, // hCurrentPool
    12101  m_CurrentFrameIndex.load(),
    12102  size,
    12103  alignment,
    12104  finalCreateInfo,
    12105  suballocType,
    12106  pAllocation);
    12107  if(res == VK_SUCCESS)
    12108  {
    12109  return res;
    12110  }
    12111 
    12112  // 5. Try dedicated memory.
    12113  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12114  {
    12115  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12116  }
    12117  else
    12118  {
    12119  res = AllocateDedicatedMemory(
    12120  size,
    12121  suballocType,
    12122  memTypeIndex,
    12123  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12124  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12125  finalCreateInfo.pUserData,
    12126  dedicatedBuffer,
    12127  dedicatedImage,
    12128  pAllocation);
    12129  if(res == VK_SUCCESS)
    12130  {
    12131  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12132  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12133  return VK_SUCCESS;
    12134  }
    12135  else
    12136  {
    12137  // Everything failed: Return error code.
    12138  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12139  return res;
    12140  }
    12141  }
    12142  }
    12143 }
    12144 
    12145 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12146  VkDeviceSize size,
    12147  VmaSuballocationType suballocType,
    12148  uint32_t memTypeIndex,
    12149  bool map,
    12150  bool isUserDataString,
    12151  void* pUserData,
    12152  VkBuffer dedicatedBuffer,
    12153  VkImage dedicatedImage,
    12154  VmaAllocation* pAllocation)
    12155 {
    12156  VMA_ASSERT(pAllocation);
    12157 
    12158  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12159  allocInfo.memoryTypeIndex = memTypeIndex;
    12160  allocInfo.allocationSize = size;
    12161 
    12162 #if VMA_DEDICATED_ALLOCATION
    12163  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12164  if(m_UseKhrDedicatedAllocation)
    12165  {
    12166  if(dedicatedBuffer != VK_NULL_HANDLE)
    12167  {
    12168  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12169  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12170  allocInfo.pNext = &dedicatedAllocInfo;
    12171  }
    12172  else if(dedicatedImage != VK_NULL_HANDLE)
    12173  {
    12174  dedicatedAllocInfo.image = dedicatedImage;
    12175  allocInfo.pNext = &dedicatedAllocInfo;
    12176  }
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 
    12180  // Allocate VkDeviceMemory.
    12181  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12182  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12183  if(res < 0)
    12184  {
    12185  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12186  return res;
    12187  }
    12188 
    12189  void* pMappedData = VMA_NULL;
    12190  if(map)
    12191  {
    12192  res = (*m_VulkanFunctions.vkMapMemory)(
    12193  m_hDevice,
    12194  hMemory,
    12195  0,
    12196  VK_WHOLE_SIZE,
    12197  0,
    12198  &pMappedData);
    12199  if(res < 0)
    12200  {
    12201  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12202  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12203  return res;
    12204  }
    12205  }
    12206 
    12207  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12208  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12209  (*pAllocation)->SetUserData(this, pUserData);
    12210  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12211  {
    12212  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12213  }
    12214 
    12215  // Register it in m_pDedicatedAllocations.
    12216  {
    12217  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12218  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12219  VMA_ASSERT(pDedicatedAllocations);
    12220  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12221  }
    12222 
    12223  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12224 
    12225  return VK_SUCCESS;
    12226 }
    12227 
    12228 void VmaAllocator_T::GetBufferMemoryRequirements(
    12229  VkBuffer hBuffer,
    12230  VkMemoryRequirements& memReq,
    12231  bool& requiresDedicatedAllocation,
    12232  bool& prefersDedicatedAllocation) const
    12233 {
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12238  memReqInfo.buffer = hBuffer;
    12239 
    12240  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12241 
    12242  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12243  memReq2.pNext = &memDedicatedReq;
    12244 
    12245  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12246 
    12247  memReq = memReq2.memoryRequirements;
    12248  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12249  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12250  }
    12251  else
    12252 #endif // #if VMA_DEDICATED_ALLOCATION
    12253  {
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12255  requiresDedicatedAllocation = false;
    12256  prefersDedicatedAllocation = false;
    12257  }
    12258 }
    12259 
    12260 void VmaAllocator_T::GetImageMemoryRequirements(
    12261  VkImage hImage,
    12262  VkMemoryRequirements& memReq,
    12263  bool& requiresDedicatedAllocation,
    12264  bool& prefersDedicatedAllocation) const
    12265 {
    12266 #if VMA_DEDICATED_ALLOCATION
    12267  if(m_UseKhrDedicatedAllocation)
    12268  {
    12269  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12270  memReqInfo.image = hImage;
    12271 
    12272  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12273 
    12274  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12275  memReq2.pNext = &memDedicatedReq;
    12276 
    12277  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12278 
    12279  memReq = memReq2.memoryRequirements;
    12280  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12281  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12282  }
    12283  else
    12284 #endif // #if VMA_DEDICATED_ALLOCATION
    12285  {
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12287  requiresDedicatedAllocation = false;
    12288  prefersDedicatedAllocation = false;
    12289  }
    12290 }
    12291 
    12292 VkResult VmaAllocator_T::AllocateMemory(
    12293  const VkMemoryRequirements& vkMemReq,
    12294  bool requiresDedicatedAllocation,
    12295  bool prefersDedicatedAllocation,
    12296  VkBuffer dedicatedBuffer,
    12297  VkImage dedicatedImage,
    12298  const VmaAllocationCreateInfo& createInfo,
    12299  VmaSuballocationType suballocType,
    12300  VmaAllocation* pAllocation)
    12301 {
    12302  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12303 
    12304  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12305  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12306  {
    12307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12309  }
    12310  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12312  {
    12313  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12314  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12315  }
    12316  if(requiresDedicatedAllocation)
    12317  {
    12318  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if(createInfo.pool != VK_NULL_HANDLE)
    12324  {
    12325  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12326  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12327  }
    12328  }
    12329  if((createInfo.pool != VK_NULL_HANDLE) &&
    12330  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12331  {
    12332  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12333  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12334  }
    12335 
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  const VkDeviceSize alignmentForPool = VMA_MAX(
    12339  vkMemReq.alignment,
    12340  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12341  return createInfo.pool->m_BlockVector.Allocate(
    12342  createInfo.pool,
    12343  m_CurrentFrameIndex.load(),
    12344  vkMemReq.size,
    12345  alignmentForPool,
    12346  createInfo,
    12347  suballocType,
    12348  pAllocation);
    12349  }
    12350  else
    12351  {
    12352  // Bit mask of memory Vulkan types acceptable for this allocation.
    12353  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12354  uint32_t memTypeIndex = UINT32_MAX;
    12355  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12356  if(res == VK_SUCCESS)
    12357  {
    12358  VkDeviceSize alignmentForMemType = VMA_MAX(
    12359  vkMemReq.alignment,
    12360  GetMemoryTypeMinAlignment(memTypeIndex));
    12361 
    12362  res = AllocateMemoryOfType(
    12363  vkMemReq.size,
    12364  alignmentForMemType,
    12365  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12366  dedicatedBuffer,
    12367  dedicatedImage,
    12368  createInfo,
    12369  memTypeIndex,
    12370  suballocType,
    12371  pAllocation);
    12372  // Succeeded on first try.
    12373  if(res == VK_SUCCESS)
    12374  {
    12375  return res;
    12376  }
    12377  // Allocation from this memory type failed. Try other compatible memory types.
    12378  else
    12379  {
    12380  for(;;)
    12381  {
    12382  // Remove old memTypeIndex from list of possibilities.
    12383  memoryTypeBits &= ~(1u << memTypeIndex);
    12384  // Find alternative memTypeIndex.
    12385  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  alignmentForMemType = VMA_MAX(
    12389  vkMemReq.alignment,
    12390  GetMemoryTypeMinAlignment(memTypeIndex));
    12391 
    12392  res = AllocateMemoryOfType(
    12393  vkMemReq.size,
    12394  alignmentForMemType,
    12395  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12396  dedicatedBuffer,
    12397  dedicatedImage,
    12398  createInfo,
    12399  memTypeIndex,
    12400  suballocType,
    12401  pAllocation);
    12402  // Allocation from this alternative memory type succeeded.
    12403  if(res == VK_SUCCESS)
    12404  {
    12405  return res;
    12406  }
    12407  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12408  }
    12409  // No other matching memory type index could be found.
    12410  else
    12411  {
    12412  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12413  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12414  }
    12415  }
    12416  }
    12417  }
    12418  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12419  else
    12420  return res;
    12421  }
    12422 }
    12423 
    12424 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12425 {
    12426  VMA_ASSERT(allocation);
    12427 
    12428  if(TouchAllocation(allocation))
    12429  {
    12430  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12431  {
    12432  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12433  }
    12434 
    12435  switch(allocation->GetType())
    12436  {
    12437  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12438  {
    12439  VmaBlockVector* pBlockVector = VMA_NULL;
    12440  VmaPool hPool = allocation->GetPool();
    12441  if(hPool != VK_NULL_HANDLE)
    12442  {
    12443  pBlockVector = &hPool->m_BlockVector;
    12444  }
    12445  else
    12446  {
    12447  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12448  pBlockVector = m_pBlockVectors[memTypeIndex];
    12449  }
    12450  pBlockVector->Free(allocation);
    12451  }
    12452  break;
    12453  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12454  FreeDedicatedMemory(allocation);
    12455  break;
    12456  default:
    12457  VMA_ASSERT(0);
    12458  }
    12459  }
    12460 
    12461  allocation->SetUserData(this, VMA_NULL);
    12462  vma_delete(this, allocation);
    12463 }
    12464 
    12465 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12466 {
    12467  // Initialize.
    12468  InitStatInfo(pStats->total);
    12469  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12470  InitStatInfo(pStats->memoryType[i]);
    12471  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12472  InitStatInfo(pStats->memoryHeap[i]);
    12473 
    12474  // Process default pools.
    12475  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12476  {
    12477  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12478  VMA_ASSERT(pBlockVector);
    12479  pBlockVector->AddStats(pStats);
    12480  }
    12481 
    12482  // Process custom pools.
    12483  {
    12484  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12485  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12486  {
    12487  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12488  }
    12489  }
    12490 
    12491  // Process dedicated allocations.
    12492  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12493  {
    12494  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12495  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12496  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12497  VMA_ASSERT(pDedicatedAllocVector);
    12498  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12499  {
    12500  VmaStatInfo allocationStatInfo;
    12501  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12502  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12503  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12504  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12505  }
    12506  }
    12507 
    12508  // Postprocess.
    12509  VmaPostprocessCalcStatInfo(pStats->total);
    12510  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12511  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12512  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12513  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12514 }
    12515 
    12516 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12517 
    12518 VkResult VmaAllocator_T::Defragment(
    12519  VmaAllocation* pAllocations,
    12520  size_t allocationCount,
    12521  VkBool32* pAllocationsChanged,
    12522  const VmaDefragmentationInfo* pDefragmentationInfo,
    12523  VmaDefragmentationStats* pDefragmentationStats)
    12524 {
    12525  if(pAllocationsChanged != VMA_NULL)
    12526  {
    12527  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12528  }
    12529  if(pDefragmentationStats != VMA_NULL)
    12530  {
    12531  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12532  }
    12533 
    12534  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12535 
    12536  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12537 
    12538  const size_t poolCount = m_Pools.size();
    12539 
    12540  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12541  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12542  {
    12543  VmaAllocation hAlloc = pAllocations[allocIndex];
    12544  VMA_ASSERT(hAlloc);
    12545  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12546  // DedicatedAlloc cannot be defragmented.
    12547  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12548  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12549  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12550  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12551  // Lost allocation cannot be defragmented.
    12552  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12553  {
    12554  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12555 
    12556  const VmaPool hAllocPool = hAlloc->GetPool();
    12557  // This allocation belongs to custom pool.
    12558  if(hAllocPool != VK_NULL_HANDLE)
    12559  {
    12560  // Pools with linear or buddy algorithm are not defragmented.
    12561  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12562  {
    12563  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12564  }
    12565  }
    12566  // This allocation belongs to general pool.
    12567  else
    12568  {
    12569  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12570  }
    12571 
    12572  if(pAllocBlockVector != VMA_NULL)
    12573  {
    12574  VmaDefragmentator* const pDefragmentator =
    12575  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12576  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12577  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12578  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12579  }
    12580  }
    12581  }
    12582 
    12583  VkResult result = VK_SUCCESS;
    12584 
    12585  // ======== Main processing.
    12586 
    12587  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12588  uint32_t maxAllocationsToMove = UINT32_MAX;
    12589  if(pDefragmentationInfo != VMA_NULL)
    12590  {
    12591  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12592  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12593  }
    12594 
    12595  // Process standard memory.
    12596  for(uint32_t memTypeIndex = 0;
    12597  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12598  ++memTypeIndex)
    12599  {
    12600  // Only HOST_VISIBLE memory types can be defragmented.
    12601  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12602  {
    12603  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12604  pDefragmentationStats,
    12605  maxBytesToMove,
    12606  maxAllocationsToMove);
    12607  }
    12608  }
    12609 
    12610  // Process custom pools.
    12611  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12612  {
    12613  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12614  pDefragmentationStats,
    12615  maxBytesToMove,
    12616  maxAllocationsToMove);
    12617  }
    12618 
    12619  // ======== Destroy defragmentators.
    12620 
    12621  // Process custom pools.
    12622  for(size_t poolIndex = poolCount; poolIndex--; )
    12623  {
    12624  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12625  }
    12626 
    12627  // Process standard memory.
    12628  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12629  {
    12630  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12631  {
    12632  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12633  }
    12634  }
    12635 
    12636  return result;
    12637 }
    12638 
    12639 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12640 {
    12641  if(hAllocation->CanBecomeLost())
    12642  {
    12643  /*
    12644  Warning: This is a carefully designed algorithm.
    12645  Do not modify unless you really know what you're doing :)
    12646  */
    12647  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12648  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12649  for(;;)
    12650  {
    12651  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12652  {
    12653  pAllocationInfo->memoryType = UINT32_MAX;
    12654  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12655  pAllocationInfo->offset = 0;
    12656  pAllocationInfo->size = hAllocation->GetSize();
    12657  pAllocationInfo->pMappedData = VMA_NULL;
    12658  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12659  return;
    12660  }
    12661  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12662  {
    12663  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12664  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12665  pAllocationInfo->offset = hAllocation->GetOffset();
    12666  pAllocationInfo->size = hAllocation->GetSize();
    12667  pAllocationInfo->pMappedData = VMA_NULL;
    12668  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12669  return;
    12670  }
    12671  else // Last use time earlier than current time.
    12672  {
    12673  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12674  {
    12675  localLastUseFrameIndex = localCurrFrameIndex;
    12676  }
    12677  }
    12678  }
    12679  }
    12680  else
    12681  {
    12682 #if VMA_STATS_STRING_ENABLED
    12683  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12684  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12685  for(;;)
    12686  {
    12687  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12688  if(localLastUseFrameIndex == localCurrFrameIndex)
    12689  {
    12690  break;
    12691  }
    12692  else // Last use time earlier than current time.
    12693  {
    12694  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12695  {
    12696  localLastUseFrameIndex = localCurrFrameIndex;
    12697  }
    12698  }
    12699  }
    12700 #endif
    12701 
    12702  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12703  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12704  pAllocationInfo->offset = hAllocation->GetOffset();
    12705  pAllocationInfo->size = hAllocation->GetSize();
    12706  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12707  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12708  }
    12709 }
    12710 
    12711 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12712 {
    12713  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12714  if(hAllocation->CanBecomeLost())
    12715  {
    12716  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12717  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12718  for(;;)
    12719  {
    12720  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12721  {
    12722  return false;
    12723  }
    12724  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12725  {
    12726  return true;
    12727  }
    12728  else // Last use time earlier than current time.
    12729  {
    12730  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12731  {
    12732  localLastUseFrameIndex = localCurrFrameIndex;
    12733  }
    12734  }
    12735  }
    12736  }
    12737  else
    12738  {
    12739 #if VMA_STATS_STRING_ENABLED
    12740  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12741  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12742  for(;;)
    12743  {
    12744  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12745  if(localLastUseFrameIndex == localCurrFrameIndex)
    12746  {
    12747  break;
    12748  }
    12749  else // Last use time earlier than current time.
    12750  {
    12751  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12752  {
    12753  localLastUseFrameIndex = localCurrFrameIndex;
    12754  }
    12755  }
    12756  }
    12757 #endif
    12758 
    12759  return true;
    12760  }
    12761 }
    12762 
    12763 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12764 {
    12765  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12766 
    12767  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12768 
    12769  if(newCreateInfo.maxBlockCount == 0)
    12770  {
    12771  newCreateInfo.maxBlockCount = SIZE_MAX;
    12772  }
    12773  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12774  {
    12775  return VK_ERROR_INITIALIZATION_FAILED;
    12776  }
    12777 
    12778  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12779 
    12780  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12781 
    12782  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12783  if(res != VK_SUCCESS)
    12784  {
    12785  vma_delete(this, *pPool);
    12786  *pPool = VMA_NULL;
    12787  return res;
    12788  }
    12789 
    12790  // Add to m_Pools.
    12791  {
    12792  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12793  (*pPool)->SetId(m_NextPoolId++);
    12794  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12795  }
    12796 
    12797  return VK_SUCCESS;
    12798 }
    12799 
    12800 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12801 {
    12802  // Remove from m_Pools.
    12803  {
    12804  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12805  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12806  VMA_ASSERT(success && "Pool not found in Allocator.");
    12807  }
    12808 
    12809  vma_delete(this, pool);
    12810 }
    12811 
    12812 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12813 {
    12814  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12815 }
    12816 
    12817 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12818 {
    12819  m_CurrentFrameIndex.store(frameIndex);
    12820 }
    12821 
    12822 void VmaAllocator_T::MakePoolAllocationsLost(
    12823  VmaPool hPool,
    12824  size_t* pLostAllocationCount)
    12825 {
    12826  hPool->m_BlockVector.MakePoolAllocationsLost(
    12827  m_CurrentFrameIndex.load(),
    12828  pLostAllocationCount);
    12829 }
    12830 
    12831 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12832 {
    12833  return hPool->m_BlockVector.CheckCorruption();
    12834 }
    12835 
    12836 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12837 {
    12838  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12839 
    12840  // Process default pools.
    12841  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12842  {
    12843  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12844  {
    12845  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12846  VMA_ASSERT(pBlockVector);
    12847  VkResult localRes = pBlockVector->CheckCorruption();
    12848  switch(localRes)
    12849  {
    12850  case VK_ERROR_FEATURE_NOT_PRESENT:
    12851  break;
    12852  case VK_SUCCESS:
    12853  finalRes = VK_SUCCESS;
    12854  break;
    12855  default:
    12856  return localRes;
    12857  }
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  {
    12863  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12864  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12865  {
    12866  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12867  {
    12868  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12869  switch(localRes)
    12870  {
    12871  case VK_ERROR_FEATURE_NOT_PRESENT:
    12872  break;
    12873  case VK_SUCCESS:
    12874  finalRes = VK_SUCCESS;
    12875  break;
    12876  default:
    12877  return localRes;
    12878  }
    12879  }
    12880  }
    12881  }
    12882 
    12883  return finalRes;
    12884 }
    12885 
    12886 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12887 {
    12888  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12889  (*pAllocation)->InitLost();
    12890 }
    12891 
    12892 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12893 {
    12894  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12895 
    12896  VkResult res;
    12897  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12898  {
    12899  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12900  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12901  {
    12902  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12903  if(res == VK_SUCCESS)
    12904  {
    12905  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12906  }
    12907  }
    12908  else
    12909  {
    12910  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12911  }
    12912  }
    12913  else
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  }
    12917 
    12918  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12919  {
    12920  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12921  }
    12922 
    12923  return res;
    12924 }
    12925 
    12926 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12927 {
    12928  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12929  {
    12930  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12931  }
    12932 
    12933  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12934 
    12935  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12936  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12937  {
    12938  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12939  m_HeapSizeLimit[heapIndex] += size;
    12940  }
    12941 }
    12942 
    12943 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12944 {
    12945  if(hAllocation->CanBecomeLost())
    12946  {
    12947  return VK_ERROR_MEMORY_MAP_FAILED;
    12948  }
    12949 
    12950  switch(hAllocation->GetType())
    12951  {
    12952  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12953  {
    12954  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12955  char *pBytes = VMA_NULL;
    12956  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12957  if(res == VK_SUCCESS)
    12958  {
    12959  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12960  hAllocation->BlockAllocMap();
    12961  }
    12962  return res;
    12963  }
    12964  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12965  return hAllocation->DedicatedAllocMap(this, ppData);
    12966  default:
    12967  VMA_ASSERT(0);
    12968  return VK_ERROR_MEMORY_MAP_FAILED;
    12969  }
    12970 }
    12971 
    12972 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12973 {
    12974  switch(hAllocation->GetType())
    12975  {
    12976  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12977  {
    12978  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12979  hAllocation->BlockAllocUnmap();
    12980  pBlock->Unmap(this, 1);
    12981  }
    12982  break;
    12983  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12984  hAllocation->DedicatedAllocUnmap(this);
    12985  break;
    12986  default:
    12987  VMA_ASSERT(0);
    12988  }
    12989 }
    12990 
    12991 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    12992 {
    12993  VkResult res = VK_SUCCESS;
    12994  switch(hAllocation->GetType())
    12995  {
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  res = GetVulkanFunctions().vkBindBufferMemory(
    12998  m_hDevice,
    12999  hBuffer,
    13000  hAllocation->GetMemory(),
    13001  0); //memoryOffset
    13002  break;
    13003  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13004  {
    13005  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13006  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13007  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13008  break;
    13009  }
    13010  default:
    13011  VMA_ASSERT(0);
    13012  }
    13013  return res;
    13014 }
    13015 
    13016 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13017 {
    13018  VkResult res = VK_SUCCESS;
    13019  switch(hAllocation->GetType())
    13020  {
    13021  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13022  res = GetVulkanFunctions().vkBindImageMemory(
    13023  m_hDevice,
    13024  hImage,
    13025  hAllocation->GetMemory(),
    13026  0); //memoryOffset
    13027  break;
    13028  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13029  {
    13030  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13031  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13032  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13033  break;
    13034  }
    13035  default:
    13036  VMA_ASSERT(0);
    13037  }
    13038  return res;
    13039 }
    13040 
    13041 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13042  VmaAllocation hAllocation,
    13043  VkDeviceSize offset, VkDeviceSize size,
    13044  VMA_CACHE_OPERATION op)
    13045 {
    13046  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13047  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13048  {
    13049  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13050  VMA_ASSERT(offset <= allocationSize);
    13051 
    13052  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13053 
    13054  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13055  memRange.memory = hAllocation->GetMemory();
    13056 
    13057  switch(hAllocation->GetType())
    13058  {
    13059  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13060  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13061  if(size == VK_WHOLE_SIZE)
    13062  {
    13063  memRange.size = allocationSize - memRange.offset;
    13064  }
    13065  else
    13066  {
    13067  VMA_ASSERT(offset + size <= allocationSize);
    13068  memRange.size = VMA_MIN(
    13069  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13070  allocationSize - memRange.offset);
    13071  }
    13072  break;
    13073 
    13074  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13075  {
    13076  // 1. Still within this allocation.
    13077  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13078  if(size == VK_WHOLE_SIZE)
    13079  {
    13080  size = allocationSize - offset;
    13081  }
    13082  else
    13083  {
    13084  VMA_ASSERT(offset + size <= allocationSize);
    13085  }
    13086  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13087 
    13088  // 2. Adjust to whole block.
    13089  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13090  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13091  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13092  memRange.offset += allocationOffset;
    13093  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13094 
    13095  break;
    13096  }
    13097 
    13098  default:
    13099  VMA_ASSERT(0);
    13100  }
    13101 
    13102  switch(op)
    13103  {
    13104  case VMA_CACHE_FLUSH:
    13105  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13106  break;
    13107  case VMA_CACHE_INVALIDATE:
    13108  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13109  break;
    13110  default:
    13111  VMA_ASSERT(0);
    13112  }
    13113  }
    13114  // else: Just ignore this call.
    13115 }
    13116 
    13117 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13118 {
    13119  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13120 
    13121  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13122  {
    13123  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13124  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13125  VMA_ASSERT(pDedicatedAllocations);
    13126  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13127  VMA_ASSERT(success);
    13128  }
    13129 
    13130  VkDeviceMemory hMemory = allocation->GetMemory();
    13131 
    13132  /*
    13133  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13134  before vkFreeMemory.
    13135 
    13136  if(allocation->GetMappedData() != VMA_NULL)
    13137  {
    13138  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13139  }
    13140  */
    13141 
    13142  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13143 
    13144  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13145 }
    13146 
    13147 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13148 {
    13149  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13150  !hAllocation->CanBecomeLost() &&
    13151  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13152  {
    13153  void* pData = VMA_NULL;
    13154  VkResult res = Map(hAllocation, &pData);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13158  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13159  Unmap(hAllocation);
    13160  }
    13161  else
    13162  {
    13163  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13164  }
    13165  }
    13166 }
    13167 
    13168 #if VMA_STATS_STRING_ENABLED
    13169 
    13170 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13171 {
    13172  bool dedicatedAllocationsStarted = false;
    13173  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13174  {
    13175  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13176  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13177  VMA_ASSERT(pDedicatedAllocVector);
    13178  if(pDedicatedAllocVector->empty() == false)
    13179  {
    13180  if(dedicatedAllocationsStarted == false)
    13181  {
    13182  dedicatedAllocationsStarted = true;
    13183  json.WriteString("DedicatedAllocations");
    13184  json.BeginObject();
    13185  }
    13186 
    13187  json.BeginString("Type ");
    13188  json.ContinueString(memTypeIndex);
    13189  json.EndString();
    13190 
    13191  json.BeginArray();
    13192 
    13193  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13194  {
    13195  json.BeginObject(true);
    13196  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13197  hAlloc->PrintParameters(json);
    13198  json.EndObject();
    13199  }
    13200 
    13201  json.EndArray();
    13202  }
    13203  }
    13204  if(dedicatedAllocationsStarted)
    13205  {
    13206  json.EndObject();
    13207  }
    13208 
    13209  {
    13210  bool allocationsStarted = false;
    13211  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13212  {
    13213  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13214  {
    13215  if(allocationsStarted == false)
    13216  {
    13217  allocationsStarted = true;
    13218  json.WriteString("DefaultPools");
    13219  json.BeginObject();
    13220  }
    13221 
    13222  json.BeginString("Type ");
    13223  json.ContinueString(memTypeIndex);
    13224  json.EndString();
    13225 
    13226  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13227  }
    13228  }
    13229  if(allocationsStarted)
    13230  {
    13231  json.EndObject();
    13232  }
    13233  }
    13234 
    13235  // Custom pools
    13236  {
    13237  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13238  const size_t poolCount = m_Pools.size();
    13239  if(poolCount > 0)
    13240  {
    13241  json.WriteString("Pools");
    13242  json.BeginObject();
    13243  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13244  {
    13245  json.BeginString();
    13246  json.ContinueString(m_Pools[poolIndex]->GetId());
    13247  json.EndString();
    13248 
    13249  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13250  }
    13251  json.EndObject();
    13252  }
    13253  }
    13254 }
    13255 
    13256 #endif // #if VMA_STATS_STRING_ENABLED
    13257 
    13259 // Public interface
    13260 
    13261 VkResult vmaCreateAllocator(
    13262  const VmaAllocatorCreateInfo* pCreateInfo,
    13263  VmaAllocator* pAllocator)
    13264 {
    13265  VMA_ASSERT(pCreateInfo && pAllocator);
    13266  VMA_DEBUG_LOG("vmaCreateAllocator");
    13267  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13268  return (*pAllocator)->Init(pCreateInfo);
    13269 }
    13270 
    13271 void vmaDestroyAllocator(
    13272  VmaAllocator allocator)
    13273 {
    13274  if(allocator != VK_NULL_HANDLE)
    13275  {
    13276  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13277  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13278  vma_delete(&allocationCallbacks, allocator);
    13279  }
    13280 }
    13281 
    13283  VmaAllocator allocator,
    13284  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13285 {
    13286  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13287  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13288 }
    13289 
    13291  VmaAllocator allocator,
    13292  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13293 {
    13294  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13295  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13296 }
    13297 
    13299  VmaAllocator allocator,
    13300  uint32_t memoryTypeIndex,
    13301  VkMemoryPropertyFlags* pFlags)
    13302 {
    13303  VMA_ASSERT(allocator && pFlags);
    13304  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13305  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13306 }
    13307 
    13309  VmaAllocator allocator,
    13310  uint32_t frameIndex)
    13311 {
    13312  VMA_ASSERT(allocator);
    13313  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13314 
    13315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13316 
    13317  allocator->SetCurrentFrameIndex(frameIndex);
    13318 }
    13319 
    13320 void vmaCalculateStats(
    13321  VmaAllocator allocator,
    13322  VmaStats* pStats)
    13323 {
    13324  VMA_ASSERT(allocator && pStats);
    13325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13326  allocator->CalculateStats(pStats);
    13327 }
    13328 
    13329 #if VMA_STATS_STRING_ENABLED
    13330 
    13331 void vmaBuildStatsString(
    13332  VmaAllocator allocator,
    13333  char** ppStatsString,
    13334  VkBool32 detailedMap)
    13335 {
    13336  VMA_ASSERT(allocator && ppStatsString);
    13337  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13338 
    13339  VmaStringBuilder sb(allocator);
    13340  {
    13341  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13342  json.BeginObject();
    13343 
    13344  VmaStats stats;
    13345  allocator->CalculateStats(&stats);
    13346 
    13347  json.WriteString("Total");
    13348  VmaPrintStatInfo(json, stats.total);
    13349 
    13350  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13351  {
    13352  json.BeginString("Heap ");
    13353  json.ContinueString(heapIndex);
    13354  json.EndString();
    13355  json.BeginObject();
    13356 
    13357  json.WriteString("Size");
    13358  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13359 
    13360  json.WriteString("Flags");
    13361  json.BeginArray(true);
    13362  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13363  {
    13364  json.WriteString("DEVICE_LOCAL");
    13365  }
    13366  json.EndArray();
    13367 
    13368  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13369  {
    13370  json.WriteString("Stats");
    13371  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13372  }
    13373 
    13374  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13375  {
    13376  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13377  {
    13378  json.BeginString("Type ");
    13379  json.ContinueString(typeIndex);
    13380  json.EndString();
    13381 
    13382  json.BeginObject();
    13383 
    13384  json.WriteString("Flags");
    13385  json.BeginArray(true);
    13386  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13387  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13388  {
    13389  json.WriteString("DEVICE_LOCAL");
    13390  }
    13391  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13392  {
    13393  json.WriteString("HOST_VISIBLE");
    13394  }
    13395  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13396  {
    13397  json.WriteString("HOST_COHERENT");
    13398  }
    13399  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13400  {
    13401  json.WriteString("HOST_CACHED");
    13402  }
    13403  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13404  {
    13405  json.WriteString("LAZILY_ALLOCATED");
    13406  }
    13407  json.EndArray();
    13408 
    13409  if(stats.memoryType[typeIndex].blockCount > 0)
    13410  {
    13411  json.WriteString("Stats");
    13412  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13413  }
    13414 
    13415  json.EndObject();
    13416  }
    13417  }
    13418 
    13419  json.EndObject();
    13420  }
    13421  if(detailedMap == VK_TRUE)
    13422  {
    13423  allocator->PrintDetailedMap(json);
    13424  }
    13425 
    13426  json.EndObject();
    13427  }
    13428 
    13429  const size_t len = sb.GetLength();
    13430  char* const pChars = vma_new_array(allocator, char, len + 1);
    13431  if(len > 0)
    13432  {
    13433  memcpy(pChars, sb.GetData(), len);
    13434  }
    13435  pChars[len] = '\0';
    13436  *ppStatsString = pChars;
    13437 }
    13438 
    13439 void vmaFreeStatsString(
    13440  VmaAllocator allocator,
    13441  char* pStatsString)
    13442 {
    13443  if(pStatsString != VMA_NULL)
    13444  {
    13445  VMA_ASSERT(allocator);
    13446  size_t len = strlen(pStatsString);
    13447  vma_delete_array(allocator, pStatsString, len + 1);
    13448  }
    13449 }
    13450 
    13451 #endif // #if VMA_STATS_STRING_ENABLED
    13452 
    13453 /*
    13454 This function is not protected by any mutex because it just reads immutable data.
    13455 */
    13456 VkResult vmaFindMemoryTypeIndex(
    13457  VmaAllocator allocator,
    13458  uint32_t memoryTypeBits,
    13459  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13460  uint32_t* pMemoryTypeIndex)
    13461 {
    13462  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13463  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13464  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13465 
    13466  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13467  {
    13468  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13469  }
    13470 
    13471  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13472  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13473 
    13474  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13475  if(mapped)
    13476  {
    13477  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13478  }
    13479 
    13480  // Convert usage to requiredFlags and preferredFlags.
    13481  switch(pAllocationCreateInfo->usage)
    13482  {
    13484  break;
    13486  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13487  {
    13488  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13489  }
    13490  break;
    13492  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13493  break;
    13495  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13496  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13497  {
    13498  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13499  }
    13500  break;
    13502  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13503  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13504  break;
    13505  default:
    13506  break;
    13507  }
    13508 
    13509  *pMemoryTypeIndex = UINT32_MAX;
    13510  uint32_t minCost = UINT32_MAX;
    13511  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13512  memTypeIndex < allocator->GetMemoryTypeCount();
    13513  ++memTypeIndex, memTypeBit <<= 1)
    13514  {
    13515  // This memory type is acceptable according to memoryTypeBits bitmask.
    13516  if((memTypeBit & memoryTypeBits) != 0)
    13517  {
    13518  const VkMemoryPropertyFlags currFlags =
    13519  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13520  // This memory type contains requiredFlags.
    13521  if((requiredFlags & ~currFlags) == 0)
    13522  {
    13523  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13524  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13525  // Remember memory type with lowest cost.
    13526  if(currCost < minCost)
    13527  {
    13528  *pMemoryTypeIndex = memTypeIndex;
    13529  if(currCost == 0)
    13530  {
    13531  return VK_SUCCESS;
    13532  }
    13533  minCost = currCost;
    13534  }
    13535  }
    13536  }
    13537  }
    13538  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkBufferCreateInfo* pBufferCreateInfo,
    13544  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13545  uint32_t* pMemoryTypeIndex)
    13546 {
    13547  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13548  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13549  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13550  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13551 
    13552  const VkDevice hDev = allocator->m_hDevice;
    13553  VkBuffer hBuffer = VK_NULL_HANDLE;
    13554  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13555  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13556  if(res == VK_SUCCESS)
    13557  {
    13558  VkMemoryRequirements memReq = {};
    13559  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13560  hDev, hBuffer, &memReq);
    13561 
    13562  res = vmaFindMemoryTypeIndex(
    13563  allocator,
    13564  memReq.memoryTypeBits,
    13565  pAllocationCreateInfo,
    13566  pMemoryTypeIndex);
    13567 
    13568  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13569  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13570  }
    13571  return res;
    13572 }
    13573 
    13575  VmaAllocator allocator,
    13576  const VkImageCreateInfo* pImageCreateInfo,
    13577  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13578  uint32_t* pMemoryTypeIndex)
    13579 {
    13580  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13581  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13582  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13583  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13584 
    13585  const VkDevice hDev = allocator->m_hDevice;
    13586  VkImage hImage = VK_NULL_HANDLE;
    13587  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13588  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13589  if(res == VK_SUCCESS)
    13590  {
    13591  VkMemoryRequirements memReq = {};
    13592  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13593  hDev, hImage, &memReq);
    13594 
    13595  res = vmaFindMemoryTypeIndex(
    13596  allocator,
    13597  memReq.memoryTypeBits,
    13598  pAllocationCreateInfo,
    13599  pMemoryTypeIndex);
    13600 
    13601  allocator->GetVulkanFunctions().vkDestroyImage(
    13602  hDev, hImage, allocator->GetAllocationCallbacks());
    13603  }
    13604  return res;
    13605 }
    13606 
    13607 VkResult vmaCreatePool(
    13608  VmaAllocator allocator,
    13609  const VmaPoolCreateInfo* pCreateInfo,
    13610  VmaPool* pPool)
    13611 {
    13612  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13613 
    13614  VMA_DEBUG_LOG("vmaCreatePool");
    13615 
    13616  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13617 
    13618  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13619 
    13620 #if VMA_RECORDING_ENABLED
    13621  if(allocator->GetRecorder() != VMA_NULL)
    13622  {
    13623  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13624  }
    13625 #endif
    13626 
    13627  return res;
    13628 }
    13629 
    13630 void vmaDestroyPool(
    13631  VmaAllocator allocator,
    13632  VmaPool pool)
    13633 {
    13634  VMA_ASSERT(allocator);
    13635 
    13636  if(pool == VK_NULL_HANDLE)
    13637  {
    13638  return;
    13639  }
    13640 
    13641  VMA_DEBUG_LOG("vmaDestroyPool");
    13642 
    13643  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13644 
    13645 #if VMA_RECORDING_ENABLED
    13646  if(allocator->GetRecorder() != VMA_NULL)
    13647  {
    13648  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13649  }
    13650 #endif
    13651 
    13652  allocator->DestroyPool(pool);
    13653 }
    13654 
    13655 void vmaGetPoolStats(
    13656  VmaAllocator allocator,
    13657  VmaPool pool,
    13658  VmaPoolStats* pPoolStats)
    13659 {
    13660  VMA_ASSERT(allocator && pool && pPoolStats);
    13661 
    13662  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13663 
    13664  allocator->GetPoolStats(pool, pPoolStats);
    13665 }
    13666 
    13668  VmaAllocator allocator,
    13669  VmaPool pool,
    13670  size_t* pLostAllocationCount)
    13671 {
    13672  VMA_ASSERT(allocator && pool);
    13673 
    13674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13675 
    13676 #if VMA_RECORDING_ENABLED
    13677  if(allocator->GetRecorder() != VMA_NULL)
    13678  {
    13679  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13680  }
    13681 #endif
    13682 
    13683  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13684 }
    13685 
    13686 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13687 {
    13688  VMA_ASSERT(allocator && pool);
    13689 
    13690  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13691 
    13692  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13693 
    13694  return allocator->CheckPoolCorruption(pool);
    13695 }
    13696 
    13697 VkResult vmaAllocateMemory(
    13698  VmaAllocator allocator,
    13699  const VkMemoryRequirements* pVkMemoryRequirements,
    13700  const VmaAllocationCreateInfo* pCreateInfo,
    13701  VmaAllocation* pAllocation,
    13702  VmaAllocationInfo* pAllocationInfo)
    13703 {
    13704  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13705 
    13706  VMA_DEBUG_LOG("vmaAllocateMemory");
    13707 
    13708  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13709 
    13710  VkResult result = allocator->AllocateMemory(
    13711  *pVkMemoryRequirements,
    13712  false, // requiresDedicatedAllocation
    13713  false, // prefersDedicatedAllocation
    13714  VK_NULL_HANDLE, // dedicatedBuffer
    13715  VK_NULL_HANDLE, // dedicatedImage
    13716  *pCreateInfo,
    13717  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13718  pAllocation);
    13719 
    13720 #if VMA_RECORDING_ENABLED
    13721  if(allocator->GetRecorder() != VMA_NULL)
    13722  {
    13723  allocator->GetRecorder()->RecordAllocateMemory(
    13724  allocator->GetCurrentFrameIndex(),
    13725  *pVkMemoryRequirements,
    13726  *pCreateInfo,
    13727  *pAllocation);
    13728  }
    13729 #endif
    13730 
    13731  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13732  {
    13733  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13734  }
    13735 
    13736  return result;
    13737 }
    13738 
    13740  VmaAllocator allocator,
    13741  VkBuffer buffer,
    13742  const VmaAllocationCreateInfo* pCreateInfo,
    13743  VmaAllocation* pAllocation,
    13744  VmaAllocationInfo* pAllocationInfo)
    13745 {
    13746  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13747 
    13748  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13749 
    13750  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13751 
    13752  VkMemoryRequirements vkMemReq = {};
    13753  bool requiresDedicatedAllocation = false;
    13754  bool prefersDedicatedAllocation = false;
    13755  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13756  requiresDedicatedAllocation,
    13757  prefersDedicatedAllocation);
    13758 
    13759  VkResult result = allocator->AllocateMemory(
    13760  vkMemReq,
    13761  requiresDedicatedAllocation,
    13762  prefersDedicatedAllocation,
    13763  buffer, // dedicatedBuffer
    13764  VK_NULL_HANDLE, // dedicatedImage
    13765  *pCreateInfo,
    13766  VMA_SUBALLOCATION_TYPE_BUFFER,
    13767  pAllocation);
    13768 
    13769 #if VMA_RECORDING_ENABLED
    13770  if(allocator->GetRecorder() != VMA_NULL)
    13771  {
    13772  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13773  allocator->GetCurrentFrameIndex(),
    13774  vkMemReq,
    13775  requiresDedicatedAllocation,
    13776  prefersDedicatedAllocation,
    13777  *pCreateInfo,
    13778  *pAllocation);
    13779  }
    13780 #endif
    13781 
    13782  if(pAllocationInfo && result == VK_SUCCESS)
    13783  {
    13784  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13785  }
    13786 
    13787  return result;
    13788 }
    13789 
    13790 VkResult vmaAllocateMemoryForImage(
    13791  VmaAllocator allocator,
    13792  VkImage image,
    13793  const VmaAllocationCreateInfo* pCreateInfo,
    13794  VmaAllocation* pAllocation,
    13795  VmaAllocationInfo* pAllocationInfo)
    13796 {
    13797  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13798 
    13799  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13800 
    13801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13802 
    13803  VkMemoryRequirements vkMemReq = {};
    13804  bool requiresDedicatedAllocation = false;
    13805  bool prefersDedicatedAllocation = false;
    13806  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13807  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13808 
    13809  VkResult result = allocator->AllocateMemory(
    13810  vkMemReq,
    13811  requiresDedicatedAllocation,
    13812  prefersDedicatedAllocation,
    13813  VK_NULL_HANDLE, // dedicatedBuffer
    13814  image, // dedicatedImage
    13815  *pCreateInfo,
    13816  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13817  pAllocation);
    13818 
    13819 #if VMA_RECORDING_ENABLED
    13820  if(allocator->GetRecorder() != VMA_NULL)
    13821  {
    13822  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13823  allocator->GetCurrentFrameIndex(),
    13824  vkMemReq,
    13825  requiresDedicatedAllocation,
    13826  prefersDedicatedAllocation,
    13827  *pCreateInfo,
    13828  *pAllocation);
    13829  }
    13830 #endif
    13831 
    13832  if(pAllocationInfo && result == VK_SUCCESS)
    13833  {
    13834  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13835  }
    13836 
    13837  return result;
    13838 }
    13839 
    13840 void vmaFreeMemory(
    13841  VmaAllocator allocator,
    13842  VmaAllocation allocation)
    13843 {
    13844  VMA_ASSERT(allocator);
    13845 
    13846  if(allocation == VK_NULL_HANDLE)
    13847  {
    13848  return;
    13849  }
    13850 
    13851  VMA_DEBUG_LOG("vmaFreeMemory");
    13852 
    13853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13854 
    13855 #if VMA_RECORDING_ENABLED
    13856  if(allocator->GetRecorder() != VMA_NULL)
    13857  {
    13858  allocator->GetRecorder()->RecordFreeMemory(
    13859  allocator->GetCurrentFrameIndex(),
    13860  allocation);
    13861  }
    13862 #endif
    13863 
    13864  allocator->FreeMemory(allocation);
    13865 }
    13866 
    13868  VmaAllocator allocator,
    13869  VmaAllocation allocation,
    13870  VmaAllocationInfo* pAllocationInfo)
    13871 {
    13872  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13873 
    13874  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13875 
    13876 #if VMA_RECORDING_ENABLED
    13877  if(allocator->GetRecorder() != VMA_NULL)
    13878  {
    13879  allocator->GetRecorder()->RecordGetAllocationInfo(
    13880  allocator->GetCurrentFrameIndex(),
    13881  allocation);
    13882  }
    13883 #endif
    13884 
    13885  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13886 }
    13887 
    13888 VkBool32 vmaTouchAllocation(
    13889  VmaAllocator allocator,
    13890  VmaAllocation allocation)
    13891 {
    13892  VMA_ASSERT(allocator && allocation);
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordTouchAllocation(
    13900  allocator->GetCurrentFrameIndex(),
    13901  allocation);
    13902  }
    13903 #endif
    13904 
    13905  return allocator->TouchAllocation(allocation);
    13906 }
    13907 
    13909  VmaAllocator allocator,
    13910  VmaAllocation allocation,
    13911  void* pUserData)
    13912 {
    13913  VMA_ASSERT(allocator && allocation);
    13914 
    13915  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13916 
    13917  allocation->SetUserData(allocator, pUserData);
    13918 
    13919 #if VMA_RECORDING_ENABLED
    13920  if(allocator->GetRecorder() != VMA_NULL)
    13921  {
    13922  allocator->GetRecorder()->RecordSetAllocationUserData(
    13923  allocator->GetCurrentFrameIndex(),
    13924  allocation,
    13925  pUserData);
    13926  }
    13927 #endif
    13928 }
    13929 
    13931  VmaAllocator allocator,
    13932  VmaAllocation* pAllocation)
    13933 {
    13934  VMA_ASSERT(allocator && pAllocation);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13937 
    13938  allocator->CreateLostAllocation(pAllocation);
    13939 
    13940 #if VMA_RECORDING_ENABLED
    13941  if(allocator->GetRecorder() != VMA_NULL)
    13942  {
    13943  allocator->GetRecorder()->RecordCreateLostAllocation(
    13944  allocator->GetCurrentFrameIndex(),
    13945  *pAllocation);
    13946  }
    13947 #endif
    13948 }
    13949 
    13950 VkResult vmaMapMemory(
    13951  VmaAllocator allocator,
    13952  VmaAllocation allocation,
    13953  void** ppData)
    13954 {
    13955  VMA_ASSERT(allocator && allocation && ppData);
    13956 
    13957  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13958 
    13959  VkResult res = allocator->Map(allocation, ppData);
    13960 
    13961 #if VMA_RECORDING_ENABLED
    13962  if(allocator->GetRecorder() != VMA_NULL)
    13963  {
    13964  allocator->GetRecorder()->RecordMapMemory(
    13965  allocator->GetCurrentFrameIndex(),
    13966  allocation);
    13967  }
    13968 #endif
    13969 
    13970  return res;
    13971 }
    13972 
    13973 void vmaUnmapMemory(
    13974  VmaAllocator allocator,
    13975  VmaAllocation allocation)
    13976 {
    13977  VMA_ASSERT(allocator && allocation);
    13978 
    13979  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13980 
    13981 #if VMA_RECORDING_ENABLED
    13982  if(allocator->GetRecorder() != VMA_NULL)
    13983  {
    13984  allocator->GetRecorder()->RecordUnmapMemory(
    13985  allocator->GetCurrentFrameIndex(),
    13986  allocation);
    13987  }
    13988 #endif
    13989 
    13990  allocator->Unmap(allocation);
    13991 }
    13992 
    13993 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13994 {
    13995  VMA_ASSERT(allocator && allocation);
    13996 
    13997  VMA_DEBUG_LOG("vmaFlushAllocation");
    13998 
    13999  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14000 
    14001  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14002 
    14003 #if VMA_RECORDING_ENABLED
    14004  if(allocator->GetRecorder() != VMA_NULL)
    14005  {
    14006  allocator->GetRecorder()->RecordFlushAllocation(
    14007  allocator->GetCurrentFrameIndex(),
    14008  allocation, offset, size);
    14009  }
    14010 #endif
    14011 }
    14012 
    14013 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14014 {
    14015  VMA_ASSERT(allocator && allocation);
    14016 
    14017  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14018 
    14019  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14020 
    14021  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14022 
    14023 #if VMA_RECORDING_ENABLED
    14024  if(allocator->GetRecorder() != VMA_NULL)
    14025  {
    14026  allocator->GetRecorder()->RecordInvalidateAllocation(
    14027  allocator->GetCurrentFrameIndex(),
    14028  allocation, offset, size);
    14029  }
    14030 #endif
    14031 }
    14032 
    14033 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14034 {
    14035  VMA_ASSERT(allocator);
    14036 
    14037  VMA_DEBUG_LOG("vmaCheckCorruption");
    14038 
    14039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14040 
    14041  return allocator->CheckCorruption(memoryTypeBits);
    14042 }
    14043 
    14044 VkResult vmaDefragment(
    14045  VmaAllocator allocator,
    14046  VmaAllocation* pAllocations,
    14047  size_t allocationCount,
    14048  VkBool32* pAllocationsChanged,
    14049  const VmaDefragmentationInfo *pDefragmentationInfo,
    14050  VmaDefragmentationStats* pDefragmentationStats)
    14051 {
    14052  VMA_ASSERT(allocator && pAllocations);
    14053 
    14054  VMA_DEBUG_LOG("vmaDefragment");
    14055 
    14056  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14057 
    14058  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14059 }
    14060 
    14061 VkResult vmaBindBufferMemory(
    14062  VmaAllocator allocator,
    14063  VmaAllocation allocation,
    14064  VkBuffer buffer)
    14065 {
    14066  VMA_ASSERT(allocator && allocation && buffer);
    14067 
    14068  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14069 
    14070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14071 
    14072  return allocator->BindBufferMemory(allocation, buffer);
    14073 }
    14074 
    14075 VkResult vmaBindImageMemory(
    14076  VmaAllocator allocator,
    14077  VmaAllocation allocation,
    14078  VkImage image)
    14079 {
    14080  VMA_ASSERT(allocator && allocation && image);
    14081 
    14082  VMA_DEBUG_LOG("vmaBindImageMemory");
    14083 
    14084  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14085 
    14086  return allocator->BindImageMemory(allocation, image);
    14087 }
    14088 
    14089 VkResult vmaCreateBuffer(
    14090  VmaAllocator allocator,
    14091  const VkBufferCreateInfo* pBufferCreateInfo,
    14092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14093  VkBuffer* pBuffer,
    14094  VmaAllocation* pAllocation,
    14095  VmaAllocationInfo* pAllocationInfo)
    14096 {
    14097  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14098 
    14099  VMA_DEBUG_LOG("vmaCreateBuffer");
    14100 
    14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14102 
    14103  *pBuffer = VK_NULL_HANDLE;
    14104  *pAllocation = VK_NULL_HANDLE;
    14105 
    14106  // 1. Create VkBuffer.
    14107  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14108  allocator->m_hDevice,
    14109  pBufferCreateInfo,
    14110  allocator->GetAllocationCallbacks(),
    14111  pBuffer);
    14112  if(res >= 0)
    14113  {
    14114  // 2. vkGetBufferMemoryRequirements.
    14115  VkMemoryRequirements vkMemReq = {};
    14116  bool requiresDedicatedAllocation = false;
    14117  bool prefersDedicatedAllocation = false;
    14118  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14119  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14120 
    14121  // Make sure alignment requirements for specific buffer usages reported
    14122  // in Physical Device Properties are included in alignment reported by memory requirements.
    14123  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14124  {
    14125  VMA_ASSERT(vkMemReq.alignment %
    14126  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14127  }
    14128  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14129  {
    14130  VMA_ASSERT(vkMemReq.alignment %
    14131  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14132  }
    14133  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14134  {
    14135  VMA_ASSERT(vkMemReq.alignment %
    14136  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14137  }
    14138 
    14139  // 3. Allocate memory using allocator.
    14140  res = allocator->AllocateMemory(
    14141  vkMemReq,
    14142  requiresDedicatedAllocation,
    14143  prefersDedicatedAllocation,
    14144  *pBuffer, // dedicatedBuffer
    14145  VK_NULL_HANDLE, // dedicatedImage
    14146  *pAllocationCreateInfo,
    14147  VMA_SUBALLOCATION_TYPE_BUFFER,
    14148  pAllocation);
    14149 
    14150 #if VMA_RECORDING_ENABLED
    14151  if(allocator->GetRecorder() != VMA_NULL)
    14152  {
    14153  allocator->GetRecorder()->RecordCreateBuffer(
    14154  allocator->GetCurrentFrameIndex(),
    14155  *pBufferCreateInfo,
    14156  *pAllocationCreateInfo,
    14157  *pAllocation);
    14158  }
    14159 #endif
    14160 
    14161  if(res >= 0)
    14162  {
    14163  // 3. Bind buffer with memory.
    14164  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14165  if(res >= 0)
    14166  {
    14167  // All steps succeeded.
    14168  #if VMA_STATS_STRING_ENABLED
    14169  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14170  #endif
    14171  if(pAllocationInfo != VMA_NULL)
    14172  {
    14173  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14174  }
    14175 
    14176  return VK_SUCCESS;
    14177  }
    14178  allocator->FreeMemory(*pAllocation);
    14179  *pAllocation = VK_NULL_HANDLE;
    14180  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14181  *pBuffer = VK_NULL_HANDLE;
    14182  return res;
    14183  }
    14184  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14185  *pBuffer = VK_NULL_HANDLE;
    14186  return res;
    14187  }
    14188  return res;
    14189 }
    14190 
    14191 void vmaDestroyBuffer(
    14192  VmaAllocator allocator,
    14193  VkBuffer buffer,
    14194  VmaAllocation allocation)
    14195 {
    14196  VMA_ASSERT(allocator);
    14197 
    14198  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14199  {
    14200  return;
    14201  }
    14202 
    14203  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14204 
    14205  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14206 
    14207 #if VMA_RECORDING_ENABLED
    14208  if(allocator->GetRecorder() != VMA_NULL)
    14209  {
    14210  allocator->GetRecorder()->RecordDestroyBuffer(
    14211  allocator->GetCurrentFrameIndex(),
    14212  allocation);
    14213  }
    14214 #endif
    14215 
    14216  if(buffer != VK_NULL_HANDLE)
    14217  {
    14218  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14219  }
    14220 
    14221  if(allocation != VK_NULL_HANDLE)
    14222  {
    14223  allocator->FreeMemory(allocation);
    14224  }
    14225 }
    14226 
    14227 VkResult vmaCreateImage(
    14228  VmaAllocator allocator,
    14229  const VkImageCreateInfo* pImageCreateInfo,
    14230  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14231  VkImage* pImage,
    14232  VmaAllocation* pAllocation,
    14233  VmaAllocationInfo* pAllocationInfo)
    14234 {
    14235  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14236 
    14237  VMA_DEBUG_LOG("vmaCreateImage");
    14238 
    14239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14240 
    14241  *pImage = VK_NULL_HANDLE;
    14242  *pAllocation = VK_NULL_HANDLE;
    14243 
    14244  // 1. Create VkImage.
    14245  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14246  allocator->m_hDevice,
    14247  pImageCreateInfo,
    14248  allocator->GetAllocationCallbacks(),
    14249  pImage);
    14250  if(res >= 0)
    14251  {
    14252  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14253  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14254  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14255 
    14256  // 2. Allocate memory using allocator.
    14257  VkMemoryRequirements vkMemReq = {};
    14258  bool requiresDedicatedAllocation = false;
    14259  bool prefersDedicatedAllocation = false;
    14260  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14261  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14262 
    14263  res = allocator->AllocateMemory(
    14264  vkMemReq,
    14265  requiresDedicatedAllocation,
    14266  prefersDedicatedAllocation,
    14267  VK_NULL_HANDLE, // dedicatedBuffer
    14268  *pImage, // dedicatedImage
    14269  *pAllocationCreateInfo,
    14270  suballocType,
    14271  pAllocation);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordCreateImage(
    14277  allocator->GetCurrentFrameIndex(),
    14278  *pImageCreateInfo,
    14279  *pAllocationCreateInfo,
    14280  *pAllocation);
    14281  }
    14282 #endif
    14283 
    14284  if(res >= 0)
    14285  {
    14286  // 3. Bind image with memory.
    14287  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14288  if(res >= 0)
    14289  {
    14290  // All steps succeeded.
    14291  #if VMA_STATS_STRING_ENABLED
    14292  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14293  #endif
    14294  if(pAllocationInfo != VMA_NULL)
    14295  {
    14296  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14297  }
    14298 
    14299  return VK_SUCCESS;
    14300  }
    14301  allocator->FreeMemory(*pAllocation);
    14302  *pAllocation = VK_NULL_HANDLE;
    14303  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14304  *pImage = VK_NULL_HANDLE;
    14305  return res;
    14306  }
    14307  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14308  *pImage = VK_NULL_HANDLE;
    14309  return res;
    14310  }
    14311  return res;
    14312 }
    14313 
    14314 void vmaDestroyImage(
    14315  VmaAllocator allocator,
    14316  VkImage image,
    14317  VmaAllocation allocation)
    14318 {
    14319  VMA_ASSERT(allocator);
    14320 
    14321  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14322  {
    14323  return;
    14324  }
    14325 
    14326  VMA_DEBUG_LOG("vmaDestroyImage");
    14327 
    14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14329 
    14330 #if VMA_RECORDING_ENABLED
    14331  if(allocator->GetRecorder() != VMA_NULL)
    14332  {
    14333  allocator->GetRecorder()->RecordDestroyImage(
    14334  allocator->GetCurrentFrameIndex(),
    14335  allocation);
    14336  }
    14337 #endif
    14338 
    14339  if(image != VK_NULL_HANDLE)
    14340  {
    14341  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14342  }
    14343  if(allocation != VK_NULL_HANDLE)
    14344  {
    14345  allocator->FreeMemory(allocation);
    14346  }
    14347 }
    14348 
    14349 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1575
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1876
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2394  VmaAllocator allocator,
    2395  VmaAllocation allocation,
    2396  VmaAllocationInfo* pAllocationInfo);
    2397 
    2412 VkBool32 vmaTouchAllocation(
    2413  VmaAllocator allocator,
    2414  VmaAllocation allocation);
    2415 
    2430  VmaAllocator allocator,
    2431  VmaAllocation allocation,
    2432  void* pUserData);
    2433 
    2445  VmaAllocator allocator,
    2446  VmaAllocation* pAllocation);
    2447 
    2482 VkResult vmaMapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation,
    2485  void** ppData);
    2486 
    2491 void vmaUnmapMemory(
    2492  VmaAllocator allocator,
    2493  VmaAllocation allocation);
    2494 
    2507 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2508 
    2521 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2522 
    2539 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2540 
    2542 typedef struct VmaDefragmentationInfo {
    2547  VkDeviceSize maxBytesToMove;
    2554 
    2556 typedef struct VmaDefragmentationStats {
    2558  VkDeviceSize bytesMoved;
    2560  VkDeviceSize bytesFreed;
    2566 
    2605 VkResult vmaDefragment(
    2606  VmaAllocator allocator,
    2607  VmaAllocation* pAllocations,
    2608  size_t allocationCount,
    2609  VkBool32* pAllocationsChanged,
    2610  const VmaDefragmentationInfo *pDefragmentationInfo,
    2611  VmaDefragmentationStats* pDefragmentationStats);
    2612 
    2625 VkResult vmaBindBufferMemory(
    2626  VmaAllocator allocator,
    2627  VmaAllocation allocation,
    2628  VkBuffer buffer);
    2629 
    2642 VkResult vmaBindImageMemory(
    2643  VmaAllocator allocator,
    2644  VmaAllocation allocation,
    2645  VkImage image);
    2646 
    2673 VkResult vmaCreateBuffer(
    2674  VmaAllocator allocator,
    2675  const VkBufferCreateInfo* pBufferCreateInfo,
    2676  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2677  VkBuffer* pBuffer,
    2678  VmaAllocation* pAllocation,
    2679  VmaAllocationInfo* pAllocationInfo);
    2680 
    2692 void vmaDestroyBuffer(
    2693  VmaAllocator allocator,
    2694  VkBuffer buffer,
    2695  VmaAllocation allocation);
    2696 
    2698 VkResult vmaCreateImage(
    2699  VmaAllocator allocator,
    2700  const VkImageCreateInfo* pImageCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkImage* pImage,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyImage(
    2718  VmaAllocator allocator,
    2719  VkImage image,
    2720  VmaAllocation allocation);
    2721 
    2722 #ifdef __cplusplus
    2723 }
    2724 #endif
    2725 
    2726 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2727 
    2728 // For Visual Studio IntelliSense.
    2729 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2730 #define VMA_IMPLEMENTATION
    2731 #endif
    2732 
    2733 #ifdef VMA_IMPLEMENTATION
    2734 #undef VMA_IMPLEMENTATION
    2735 
    2736 #include <cstdint>
    2737 #include <cstdlib>
    2738 #include <cstring>
    2739 
    2740 /*******************************************************************************
    2741 CONFIGURATION SECTION
    2742 
    2743 Define some of these macros before each #include of this header or change them
    2744 here if you need other then default behavior depending on your environment.
    2745 */
    2746 
    2747 /*
    2748 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2749 internally, like:
    2750 
    2751  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2752 
    2753 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2754 VmaAllocatorCreateInfo::pVulkanFunctions.
    2755 */
    2756 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2757 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2758 #endif
    2759 
    2760 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2761 //#define VMA_USE_STL_CONTAINERS 1
    2762 
    2763 /* Set this macro to 1 to make the library including and using STL containers:
    2764 std::pair, std::vector, std::list, std::unordered_map.
    2765 
    2766 Set it to 0 or undefined to make the library using its own implementation of
    2767 the containers.
    2768 */
    2769 #if VMA_USE_STL_CONTAINERS
    2770  #define VMA_USE_STL_VECTOR 1
    2771  #define VMA_USE_STL_UNORDERED_MAP 1
    2772  #define VMA_USE_STL_LIST 1
    2773 #endif
    2774 
    2775 #if VMA_USE_STL_VECTOR
    2776  #include <vector>
    2777 #endif
    2778 
    2779 #if VMA_USE_STL_UNORDERED_MAP
    2780  #include <unordered_map>
    2781 #endif
    2782 
    2783 #if VMA_USE_STL_LIST
    2784  #include <list>
    2785 #endif
    2786 
    2787 /*
    2788 Following headers are used in this CONFIGURATION section only, so feel free to
    2789 remove them if not needed.
    2790 */
    2791 #include <cassert> // for assert
    2792 #include <algorithm> // for min, max
    2793 #include <mutex> // for std::mutex
    2794 #include <atomic> // for std::atomic
    2795 
    2796 #ifndef VMA_NULL
    2797  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2798  #define VMA_NULL nullptr
    2799 #endif
    2800 
    2801 #if defined(__APPLE__) || defined(__ANDROID__)
    2802 #include <cstdlib>
    2803 void *aligned_alloc(size_t alignment, size_t size)
    2804 {
    2805  // alignment must be >= sizeof(void*)
    2806  if(alignment < sizeof(void*))
    2807  {
    2808  alignment = sizeof(void*);
    2809  }
    2810 
    2811  void *pointer;
    2812  if(posix_memalign(&pointer, alignment, size) == 0)
    2813  return pointer;
    2814  return VMA_NULL;
    2815 }
    2816 #endif
    2817 
    2818 // If your compiler is not compatible with C++11 and definition of
    2819 // aligned_alloc() function is missing, uncommeting following line may help:
    2820 
    2821 //#include <malloc.h>
    2822 
    2823 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2824 #ifndef VMA_ASSERT
    2825  #ifdef _DEBUG
    2826  #define VMA_ASSERT(expr) assert(expr)
    2827  #else
    2828  #define VMA_ASSERT(expr)
    2829  #endif
    2830 #endif
    2831 
    2832 // Assert that will be called very often, like inside data structures e.g. operator[].
    2833 // Making it non-empty can make program slow.
    2834 #ifndef VMA_HEAVY_ASSERT
    2835  #ifdef _DEBUG
    2836  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2837  #else
    2838  #define VMA_HEAVY_ASSERT(expr)
    2839  #endif
    2840 #endif
    2841 
    2842 #ifndef VMA_ALIGN_OF
    2843  #define VMA_ALIGN_OF(type) (__alignof(type))
    2844 #endif
    2845 
    2846 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2847  #if defined(_WIN32)
    2848  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2849  #else
    2850  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2851  #endif
    2852 #endif
    2853 
    2854 #ifndef VMA_SYSTEM_FREE
    2855  #if defined(_WIN32)
    2856  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2857  #else
    2858  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2859  #endif
    2860 #endif
    2861 
    2862 #ifndef VMA_MIN
    2863  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2864 #endif
    2865 
    2866 #ifndef VMA_MAX
    2867  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2868 #endif
    2869 
    2870 #ifndef VMA_SWAP
    2871  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2872 #endif
    2873 
    2874 #ifndef VMA_SORT
    2875  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2876 #endif
    2877 
    2878 #ifndef VMA_DEBUG_LOG
    2879  #define VMA_DEBUG_LOG(format, ...)
    2880  /*
    2881  #define VMA_DEBUG_LOG(format, ...) do { \
    2882  printf(format, __VA_ARGS__); \
    2883  printf("\n"); \
    2884  } while(false)
    2885  */
    2886 #endif
    2887 
    2888 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2889 #if VMA_STATS_STRING_ENABLED
    2890  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2891  {
    2892  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2893  }
    2894  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2895  {
    2896  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2897  }
    2898  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2899  {
    2900  snprintf(outStr, strLen, "%p", ptr);
    2901  }
    2902 #endif
    2903 
    2904 #ifndef VMA_MUTEX
    2905  class VmaMutex
    2906  {
    2907  public:
    2908  VmaMutex() { }
    2909  ~VmaMutex() { }
    2910  void Lock() { m_Mutex.lock(); }
    2911  void Unlock() { m_Mutex.unlock(); }
    2912  private:
    2913  std::mutex m_Mutex;
    2914  };
    2915  #define VMA_MUTEX VmaMutex
    2916 #endif
    2917 
    2918 /*
    2919 If providing your own implementation, you need to implement a subset of std::atomic:
    2920 
    2921 - Constructor(uint32_t desired)
    2922 - uint32_t load() const
    2923 - void store(uint32_t desired)
    2924 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2925 */
    2926 #ifndef VMA_ATOMIC_UINT32
    2927  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2928 #endif
    2929 
    2930 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2931 
    2935  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2936 #endif
    2937 
    2938 #ifndef VMA_DEBUG_ALIGNMENT
    2939 
    2943  #define VMA_DEBUG_ALIGNMENT (1)
    2944 #endif
    2945 
    2946 #ifndef VMA_DEBUG_MARGIN
    2947 
    2951  #define VMA_DEBUG_MARGIN (0)
    2952 #endif
    2953 
    2954 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2955 
    2959  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2963 
    2968  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2969 #endif
    2970 
    2971 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2972 
    2976  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2980 
    2984  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2985 #endif
    2986 
    2987 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2988  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2990 #endif
    2991 
    2992 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2993  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2995 #endif
    2996 
    2997 #ifndef VMA_CLASS_NO_COPY
    2998  #define VMA_CLASS_NO_COPY(className) \
    2999  private: \
    3000  className(const className&) = delete; \
    3001  className& operator=(const className&) = delete;
    3002 #endif
    3003 
    3004 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3005 
    3006 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3007 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3008 
    3009 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3010 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3011 
    3012 /*******************************************************************************
    3013 END OF CONFIGURATION
    3014 */
    3015 
    3016 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3017  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3018 
    3019 // Returns number of bits set to 1 in (v).
    3020 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3021 {
    3022  uint32_t c = v - ((v >> 1) & 0x55555555);
    3023  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3024  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3025  c = ((c >> 8) + c) & 0x00FF00FF;
    3026  c = ((c >> 16) + c) & 0x0000FFFF;
    3027  return c;
    3028 }
    3029 
    3030 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3031 // Use types like uint32_t, uint64_t as T.
    3032 template <typename T>
    3033 static inline T VmaAlignUp(T val, T align)
    3034 {
    3035  return (val + align - 1) / align * align;
    3036 }
    3037 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3038 // Use types like uint32_t, uint64_t as T.
    3039 template <typename T>
    3040 static inline T VmaAlignDown(T val, T align)
    3041 {
    3042  return val / align * align;
    3043 }
    3044 
    3045 // Division with mathematical rounding to nearest number.
    3046 template <typename T>
    3047 static inline T VmaRoundDiv(T x, T y)
    3048 {
    3049  return (x + (y / (T)2)) / y;
    3050 }
    3051 
    3052 /*
    3053 Returns true if given number is a power of two.
    3054 T must be unsigned integer number or signed integer but always nonnegative.
    3055 For 0 returns true.
    3056 */
    3057 template <typename T>
    3058 inline bool VmaIsPow2(T x)
    3059 {
    3060  return (x & (x-1)) == 0;
    3061 }
    3062 
    3063 // Returns smallest power of 2 greater or equal to v.
    3064 static inline uint32_t VmaNextPow2(uint32_t v)
    3065 {
    3066  v--;
    3067  v |= v >> 1;
    3068  v |= v >> 2;
    3069  v |= v >> 4;
    3070  v |= v >> 8;
    3071  v |= v >> 16;
    3072  v++;
    3073  return v;
    3074 }
    3075 static inline uint64_t VmaNextPow2(uint64_t v)
    3076 {
    3077  v--;
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v |= v >> 32;
    3084  v++;
    3085  return v;
    3086 }
    3087 
    3088 // Returns largest power of 2 less or equal to v.
    3089 static inline uint32_t VmaPrevPow2(uint32_t v)
    3090 {
    3091  v |= v >> 1;
    3092  v |= v >> 2;
    3093  v |= v >> 4;
    3094  v |= v >> 8;
    3095  v |= v >> 16;
    3096  v = v ^ (v >> 1);
    3097  return v;
    3098 }
    3099 static inline uint64_t VmaPrevPow2(uint64_t v)
    3100 {
    3101  v |= v >> 1;
    3102  v |= v >> 2;
    3103  v |= v >> 4;
    3104  v |= v >> 8;
    3105  v |= v >> 16;
    3106  v |= v >> 32;
    3107  v = v ^ (v >> 1);
    3108  return v;
    3109 }
    3110 
    3111 static inline bool VmaStrIsEmpty(const char* pStr)
    3112 {
    3113  return pStr == VMA_NULL || *pStr == '\0';
    3114 }
    3115 
    3116 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3117 {
    3118  switch(algorithm)
    3119  {
    3121  return "Linear";
    3123  return "Buddy";
    3124  case 0:
    3125  return "Default";
    3126  default:
    3127  VMA_ASSERT(0);
    3128  return "";
    3129  }
    3130 }
    3131 
    3132 #ifndef VMA_SORT
    3133 
    3134 template<typename Iterator, typename Compare>
    3135 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3136 {
    3137  Iterator centerValue = end; --centerValue;
    3138  Iterator insertIndex = beg;
    3139  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3140  {
    3141  if(cmp(*memTypeIndex, *centerValue))
    3142  {
    3143  if(insertIndex != memTypeIndex)
    3144  {
    3145  VMA_SWAP(*memTypeIndex, *insertIndex);
    3146  }
    3147  ++insertIndex;
    3148  }
    3149  }
    3150  if(insertIndex != centerValue)
    3151  {
    3152  VMA_SWAP(*insertIndex, *centerValue);
    3153  }
    3154  return insertIndex;
    3155 }
    3156 
    3157 template<typename Iterator, typename Compare>
    3158 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3159 {
    3160  if(beg < end)
    3161  {
    3162  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3163  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3164  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3165  }
    3166 }
    3167 
    3168 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3169 
    3170 #endif // #ifndef VMA_SORT
    3171 
    3172 /*
    3173 Returns true if two memory blocks occupy overlapping pages.
    3174 ResourceA must be in less memory offset than ResourceB.
    3175 
    3176 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3177 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3178 */
    3179 static inline bool VmaBlocksOnSamePage(
    3180  VkDeviceSize resourceAOffset,
    3181  VkDeviceSize resourceASize,
    3182  VkDeviceSize resourceBOffset,
    3183  VkDeviceSize pageSize)
    3184 {
    3185  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3186  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3187  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3188  VkDeviceSize resourceBStart = resourceBOffset;
    3189  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3190  return resourceAEndPage == resourceBStartPage;
    3191 }
    3192 
    3193 enum VmaSuballocationType
    3194 {
    3195  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3196  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3197  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3198  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3199  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3200  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3201  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3202 };
    3203 
    3204 /*
    3205 Returns true if given suballocation types could conflict and must respect
    3206 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3207 or linear image and another one is optimal image. If type is unknown, behave
    3208 conservatively.
    3209 */
    3210 static inline bool VmaIsBufferImageGranularityConflict(
    3211  VmaSuballocationType suballocType1,
    3212  VmaSuballocationType suballocType2)
    3213 {
    3214  if(suballocType1 > suballocType2)
    3215  {
    3216  VMA_SWAP(suballocType1, suballocType2);
    3217  }
    3218 
    3219  switch(suballocType1)
    3220  {
    3221  case VMA_SUBALLOCATION_TYPE_FREE:
    3222  return false;
    3223  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3224  return true;
    3225  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3228  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3229  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3230  return
    3231  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3232  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3233  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3234  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3235  return
    3236  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3237  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3238  return false;
    3239  default:
    3240  VMA_ASSERT(0);
    3241  return true;
    3242  }
    3243 }
    3244 
    3245 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3246 {
    3247  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3248  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3249  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3250  {
    3251  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3252  }
    3253 }
    3254 
    3255 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3256 {
    3257  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3258  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3259  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3260  {
    3261  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3262  {
    3263  return false;
    3264  }
    3265  }
    3266  return true;
    3267 }
    3268 
    3269 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3270 struct VmaMutexLock
    3271 {
    3272  VMA_CLASS_NO_COPY(VmaMutexLock)
    3273 public:
    3274  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3275  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3276  {
    3277  if(m_pMutex)
    3278  {
    3279  m_pMutex->Lock();
    3280  }
    3281  }
    3282 
    3283  ~VmaMutexLock()
    3284  {
    3285  if(m_pMutex)
    3286  {
    3287  m_pMutex->Unlock();
    3288  }
    3289  }
    3290 
    3291 private:
    3292  VMA_MUTEX* m_pMutex;
    3293 };
    3294 
    3295 #if VMA_DEBUG_GLOBAL_MUTEX
    3296  static VMA_MUTEX gDebugGlobalMutex;
    3297  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3298 #else
    3299  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3300 #endif
    3301 
    3302 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3303 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3304 
    3305 /*
    3306 Performs binary search and returns iterator to first element that is greater or
    3307 equal to (key), according to comparison (cmp).
    3308 
    3309 Cmp should return true if first argument is less than second argument.
    3310 
    3311 Returned value is the found element, if present in the collection or place where
    3312 new element with value (key) should be inserted.
    3313 */
    3314 template <typename CmpLess, typename IterT, typename KeyT>
    3315 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3316 {
    3317  size_t down = 0, up = (end - beg);
    3318  while(down < up)
    3319  {
    3320  const size_t mid = (down + up) / 2;
    3321  if(cmp(*(beg+mid), key))
    3322  {
    3323  down = mid + 1;
    3324  }
    3325  else
    3326  {
    3327  up = mid;
    3328  }
    3329  }
    3330  return beg + down;
    3331 }
    3332 
    3334 // Memory allocation
    3335 
    3336 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3337 {
    3338  if((pAllocationCallbacks != VMA_NULL) &&
    3339  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3340  {
    3341  return (*pAllocationCallbacks->pfnAllocation)(
    3342  pAllocationCallbacks->pUserData,
    3343  size,
    3344  alignment,
    3345  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3346  }
    3347  else
    3348  {
    3349  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3350  }
    3351 }
    3352 
    3353 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3354 {
    3355  if((pAllocationCallbacks != VMA_NULL) &&
    3356  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3357  {
    3358  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3359  }
    3360  else
    3361  {
    3362  VMA_SYSTEM_FREE(ptr);
    3363  }
    3364 }
    3365 
    3366 template<typename T>
    3367 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3368 {
    3369  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3370 }
    3371 
    3372 template<typename T>
    3373 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3374 {
    3375  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3376 }
    3377 
    3378 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3379 
    3380 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3381 
    3382 template<typename T>
    3383 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3384 {
    3385  ptr->~T();
    3386  VmaFree(pAllocationCallbacks, ptr);
    3387 }
    3388 
    3389 template<typename T>
    3390 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3391 {
    3392  if(ptr != VMA_NULL)
    3393  {
    3394  for(size_t i = count; i--; )
    3395  {
    3396  ptr[i].~T();
    3397  }
    3398  VmaFree(pAllocationCallbacks, ptr);
    3399  }
    3400 }
    3401 
    3402 // STL-compatible allocator.
    3403 template<typename T>
    3404 class VmaStlAllocator
    3405 {
    3406 public:
    3407  const VkAllocationCallbacks* const m_pCallbacks;
    3408  typedef T value_type;
    3409 
    3410  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3411  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3412 
    3413  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3414  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3415 
    3416  template<typename U>
    3417  bool operator==(const VmaStlAllocator<U>& rhs) const
    3418  {
    3419  return m_pCallbacks == rhs.m_pCallbacks;
    3420  }
    3421  template<typename U>
    3422  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3423  {
    3424  return m_pCallbacks != rhs.m_pCallbacks;
    3425  }
    3426 
    3427  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3428 };
    3429 
    3430 #if VMA_USE_STL_VECTOR
    3431 
    3432 #define VmaVector std::vector
    3433 
    3434 template<typename T, typename allocatorT>
    3435 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3436 {
    3437  vec.insert(vec.begin() + index, item);
    3438 }
    3439 
    3440 template<typename T, typename allocatorT>
    3441 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3442 {
    3443  vec.erase(vec.begin() + index);
    3444 }
    3445 
    3446 #else // #if VMA_USE_STL_VECTOR
    3447 
    3448 /* Class with interface compatible with subset of std::vector.
    3449 T must be POD because constructors and destructors are not called and memcpy is
    3450 used for these objects. */
    3451 template<typename T, typename AllocatorT>
    3452 class VmaVector
    3453 {
    3454 public:
    3455  typedef T value_type;
    3456 
    3457  VmaVector(const AllocatorT& allocator) :
    3458  m_Allocator(allocator),
    3459  m_pArray(VMA_NULL),
    3460  m_Count(0),
    3461  m_Capacity(0)
    3462  {
    3463  }
    3464 
    3465  VmaVector(size_t count, const AllocatorT& allocator) :
    3466  m_Allocator(allocator),
    3467  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3468  m_Count(count),
    3469  m_Capacity(count)
    3470  {
    3471  }
    3472 
    3473  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3474  m_Allocator(src.m_Allocator),
    3475  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3476  m_Count(src.m_Count),
    3477  m_Capacity(src.m_Count)
    3478  {
    3479  if(m_Count != 0)
    3480  {
    3481  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3482  }
    3483  }
    3484 
    3485  ~VmaVector()
    3486  {
    3487  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3488  }
    3489 
    3490  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3491  {
    3492  if(&rhs != this)
    3493  {
    3494  resize(rhs.m_Count);
    3495  if(m_Count != 0)
    3496  {
    3497  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3498  }
    3499  }
    3500  return *this;
    3501  }
    3502 
    3503  bool empty() const { return m_Count == 0; }
    3504  size_t size() const { return m_Count; }
    3505  T* data() { return m_pArray; }
    3506  const T* data() const { return m_pArray; }
    3507 
    3508  T& operator[](size_t index)
    3509  {
    3510  VMA_HEAVY_ASSERT(index < m_Count);
    3511  return m_pArray[index];
    3512  }
    3513  const T& operator[](size_t index) const
    3514  {
    3515  VMA_HEAVY_ASSERT(index < m_Count);
    3516  return m_pArray[index];
    3517  }
    3518 
    3519  T& front()
    3520  {
    3521  VMA_HEAVY_ASSERT(m_Count > 0);
    3522  return m_pArray[0];
    3523  }
    3524  const T& front() const
    3525  {
    3526  VMA_HEAVY_ASSERT(m_Count > 0);
    3527  return m_pArray[0];
    3528  }
    3529  T& back()
    3530  {
    3531  VMA_HEAVY_ASSERT(m_Count > 0);
    3532  return m_pArray[m_Count - 1];
    3533  }
    3534  const T& back() const
    3535  {
    3536  VMA_HEAVY_ASSERT(m_Count > 0);
    3537  return m_pArray[m_Count - 1];
    3538  }
    3539 
    3540  void reserve(size_t newCapacity, bool freeMemory = false)
    3541  {
    3542  newCapacity = VMA_MAX(newCapacity, m_Count);
    3543 
    3544  if((newCapacity < m_Capacity) && !freeMemory)
    3545  {
    3546  newCapacity = m_Capacity;
    3547  }
    3548 
    3549  if(newCapacity != m_Capacity)
    3550  {
    3551  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3552  if(m_Count != 0)
    3553  {
    3554  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3555  }
    3556  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3557  m_Capacity = newCapacity;
    3558  m_pArray = newArray;
    3559  }
    3560  }
    3561 
    3562  void resize(size_t newCount, bool freeMemory = false)
    3563  {
    3564  size_t newCapacity = m_Capacity;
    3565  if(newCount > m_Capacity)
    3566  {
    3567  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3568  }
    3569  else if(freeMemory)
    3570  {
    3571  newCapacity = newCount;
    3572  }
    3573 
    3574  if(newCapacity != m_Capacity)
    3575  {
    3576  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3577  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3578  if(elementsToCopy != 0)
    3579  {
    3580  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3581  }
    3582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3583  m_Capacity = newCapacity;
    3584  m_pArray = newArray;
    3585  }
    3586 
    3587  m_Count = newCount;
    3588  }
    3589 
    3590  void clear(bool freeMemory = false)
    3591  {
    3592  resize(0, freeMemory);
    3593  }
    3594 
    3595  void insert(size_t index, const T& src)
    3596  {
    3597  VMA_HEAVY_ASSERT(index <= m_Count);
    3598  const size_t oldCount = size();
    3599  resize(oldCount + 1);
    3600  if(index < oldCount)
    3601  {
    3602  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3603  }
    3604  m_pArray[index] = src;
    3605  }
    3606 
    3607  void remove(size_t index)
    3608  {
    3609  VMA_HEAVY_ASSERT(index < m_Count);
    3610  const size_t oldCount = size();
    3611  if(index < oldCount - 1)
    3612  {
    3613  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3614  }
    3615  resize(oldCount - 1);
    3616  }
    3617 
    3618  void push_back(const T& src)
    3619  {
    3620  const size_t newIndex = size();
    3621  resize(newIndex + 1);
    3622  m_pArray[newIndex] = src;
    3623  }
    3624 
    3625  void pop_back()
    3626  {
    3627  VMA_HEAVY_ASSERT(m_Count > 0);
    3628  resize(size() - 1);
    3629  }
    3630 
    3631  void push_front(const T& src)
    3632  {
    3633  insert(0, src);
    3634  }
    3635 
    3636  void pop_front()
    3637  {
    3638  VMA_HEAVY_ASSERT(m_Count > 0);
    3639  remove(0);
    3640  }
    3641 
    3642  typedef T* iterator;
    3643 
    3644  iterator begin() { return m_pArray; }
    3645  iterator end() { return m_pArray + m_Count; }
    3646 
    3647 private:
    3648  AllocatorT m_Allocator;
    3649  T* m_pArray;
    3650  size_t m_Count;
    3651  size_t m_Capacity;
    3652 };
    3653 
    3654 template<typename T, typename allocatorT>
    3655 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3656 {
    3657  vec.insert(index, item);
    3658 }
    3659 
    3660 template<typename T, typename allocatorT>
    3661 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3662 {
    3663  vec.remove(index);
    3664 }
    3665 
    3666 #endif // #if VMA_USE_STL_VECTOR
    3667 
    3668 template<typename CmpLess, typename VectorT>
    3669 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3670 {
    3671  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3672  vector.data(),
    3673  vector.data() + vector.size(),
    3674  value,
    3675  CmpLess()) - vector.data();
    3676  VmaVectorInsert(vector, indexToInsert, value);
    3677  return indexToInsert;
    3678 }
    3679 
    3680 template<typename CmpLess, typename VectorT>
    3681 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3682 {
    3683  CmpLess comparator;
    3684  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3685  vector.begin(),
    3686  vector.end(),
    3687  value,
    3688  comparator);
    3689  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3690  {
    3691  size_t indexToRemove = it - vector.begin();
    3692  VmaVectorRemove(vector, indexToRemove);
    3693  return true;
    3694  }
    3695  return false;
    3696 }
    3697 
    3698 template<typename CmpLess, typename IterT, typename KeyT>
    3699 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3700 {
    3701  CmpLess comparator;
    3702  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3703  beg, end, value, comparator);
    3704  if(it == end ||
    3705  (!comparator(*it, value) && !comparator(value, *it)))
    3706  {
    3707  return it;
    3708  }
    3709  return end;
    3710 }
    3711 
    3713 // class VmaPoolAllocator
    3714 
    3715 /*
    3716 Allocator for objects of type T using a list of arrays (pools) to speed up
    3717 allocation. Number of elements that can be allocated is not bounded because
    3718 allocator can create multiple blocks.
    3719 */
    3720 template<typename T>
    3721 class VmaPoolAllocator
    3722 {
    3723  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3724 public:
    3725  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3726  ~VmaPoolAllocator();
    3727  void Clear();
    3728  T* Alloc();
    3729  void Free(T* ptr);
    3730 
    3731 private:
    3732  union Item
    3733  {
    3734  uint32_t NextFreeIndex;
    3735  T Value;
    3736  };
    3737 
    3738  struct ItemBlock
    3739  {
    3740  Item* pItems;
    3741  uint32_t FirstFreeIndex;
    3742  };
    3743 
    3744  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3745  size_t m_ItemsPerBlock;
    3746  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3747 
    3748  ItemBlock& CreateNewBlock();
    3749 };
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3753  m_pAllocationCallbacks(pAllocationCallbacks),
    3754  m_ItemsPerBlock(itemsPerBlock),
    3755  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3756 {
    3757  VMA_ASSERT(itemsPerBlock > 0);
    3758 }
    3759 
    3760 template<typename T>
    3761 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3762 {
    3763  Clear();
    3764 }
    3765 
    3766 template<typename T>
    3767 void VmaPoolAllocator<T>::Clear()
    3768 {
    3769  for(size_t i = m_ItemBlocks.size(); i--; )
    3770  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3771  m_ItemBlocks.clear();
    3772 }
    3773 
    3774 template<typename T>
    3775 T* VmaPoolAllocator<T>::Alloc()
    3776 {
    3777  for(size_t i = m_ItemBlocks.size(); i--; )
    3778  {
    3779  ItemBlock& block = m_ItemBlocks[i];
    3780  // This block has some free items: Use first one.
    3781  if(block.FirstFreeIndex != UINT32_MAX)
    3782  {
    3783  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3784  block.FirstFreeIndex = pItem->NextFreeIndex;
    3785  return &pItem->Value;
    3786  }
    3787  }
    3788 
    3789  // No block has free item: Create new one and use it.
    3790  ItemBlock& newBlock = CreateNewBlock();
    3791  Item* const pItem = &newBlock.pItems[0];
    3792  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3793  return &pItem->Value;
    3794 }
    3795 
    3796 template<typename T>
    3797 void VmaPoolAllocator<T>::Free(T* ptr)
    3798 {
    3799  // Search all memory blocks to find ptr.
    3800  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3801  {
    3802  ItemBlock& block = m_ItemBlocks[i];
    3803 
    3804  // Casting to union.
    3805  Item* pItemPtr;
    3806  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3807 
    3808  // Check if pItemPtr is in address range of this block.
    3809  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3810  {
    3811  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3812  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3813  block.FirstFreeIndex = index;
    3814  return;
    3815  }
    3816  }
    3817  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3818 }
    3819 
    3820 template<typename T>
    3821 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3822 {
    3823  ItemBlock newBlock = {
    3824  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3825 
    3826  m_ItemBlocks.push_back(newBlock);
    3827 
    3828  // Setup singly-linked list of all free items in this block.
    3829  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3830  newBlock.pItems[i].NextFreeIndex = i + 1;
    3831  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3832  return m_ItemBlocks.back();
    3833 }
    3834 
    3836 // class VmaRawList, VmaList
    3837 
    3838 #if VMA_USE_STL_LIST
    3839 
    3840 #define VmaList std::list
    3841 
    3842 #else // #if VMA_USE_STL_LIST
    3843 
    3844 template<typename T>
    3845 struct VmaListItem
    3846 {
    3847  VmaListItem* pPrev;
    3848  VmaListItem* pNext;
    3849  T Value;
    3850 };
    3851 
    3852 // Doubly linked list.
    3853 template<typename T>
    3854 class VmaRawList
    3855 {
    3856  VMA_CLASS_NO_COPY(VmaRawList)
    3857 public:
    3858  typedef VmaListItem<T> ItemType;
    3859 
    3860  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3861  ~VmaRawList();
    3862  void Clear();
    3863 
    3864  size_t GetCount() const { return m_Count; }
    3865  bool IsEmpty() const { return m_Count == 0; }
    3866 
    3867  ItemType* Front() { return m_pFront; }
    3868  const ItemType* Front() const { return m_pFront; }
    3869  ItemType* Back() { return m_pBack; }
    3870  const ItemType* Back() const { return m_pBack; }
    3871 
    3872  ItemType* PushBack();
    3873  ItemType* PushFront();
    3874  ItemType* PushBack(const T& value);
    3875  ItemType* PushFront(const T& value);
    3876  void PopBack();
    3877  void PopFront();
    3878 
    3879  // Item can be null - it means PushBack.
    3880  ItemType* InsertBefore(ItemType* pItem);
    3881  // Item can be null - it means PushFront.
    3882  ItemType* InsertAfter(ItemType* pItem);
    3883 
    3884  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3885  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3886 
    3887  void Remove(ItemType* pItem);
    3888 
    3889 private:
    3890  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3891  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3892  ItemType* m_pFront;
    3893  ItemType* m_pBack;
    3894  size_t m_Count;
    3895 };
    3896 
    3897 template<typename T>
    3898 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3899  m_pAllocationCallbacks(pAllocationCallbacks),
    3900  m_ItemAllocator(pAllocationCallbacks, 128),
    3901  m_pFront(VMA_NULL),
    3902  m_pBack(VMA_NULL),
    3903  m_Count(0)
    3904 {
    3905 }
    3906 
    3907 template<typename T>
    3908 VmaRawList<T>::~VmaRawList()
    3909 {
    3910  // Intentionally not calling Clear, because that would be unnecessary
    3911  // computations to return all items to m_ItemAllocator as free.
    3912 }
    3913 
    3914 template<typename T>
    3915 void VmaRawList<T>::Clear()
    3916 {
    3917  if(IsEmpty() == false)
    3918  {
    3919  ItemType* pItem = m_pBack;
    3920  while(pItem != VMA_NULL)
    3921  {
    3922  ItemType* const pPrevItem = pItem->pPrev;
    3923  m_ItemAllocator.Free(pItem);
    3924  pItem = pPrevItem;
    3925  }
    3926  m_pFront = VMA_NULL;
    3927  m_pBack = VMA_NULL;
    3928  m_Count = 0;
    3929  }
    3930 }
    3931 
    3932 template<typename T>
    3933 VmaListItem<T>* VmaRawList<T>::PushBack()
    3934 {
    3935  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3936  pNewItem->pNext = VMA_NULL;
    3937  if(IsEmpty())
    3938  {
    3939  pNewItem->pPrev = VMA_NULL;
    3940  m_pFront = pNewItem;
    3941  m_pBack = pNewItem;
    3942  m_Count = 1;
    3943  }
    3944  else
    3945  {
    3946  pNewItem->pPrev = m_pBack;
    3947  m_pBack->pNext = pNewItem;
    3948  m_pBack = pNewItem;
    3949  ++m_Count;
    3950  }
    3951  return pNewItem;
    3952 }
    3953 
    3954 template<typename T>
    3955 VmaListItem<T>* VmaRawList<T>::PushFront()
    3956 {
    3957  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3958  pNewItem->pPrev = VMA_NULL;
    3959  if(IsEmpty())
    3960  {
    3961  pNewItem->pNext = VMA_NULL;
    3962  m_pFront = pNewItem;
    3963  m_pBack = pNewItem;
    3964  m_Count = 1;
    3965  }
    3966  else
    3967  {
    3968  pNewItem->pNext = m_pFront;
    3969  m_pFront->pPrev = pNewItem;
    3970  m_pFront = pNewItem;
    3971  ++m_Count;
    3972  }
    3973  return pNewItem;
    3974 }
    3975 
    3976 template<typename T>
    3977 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3978 {
    3979  ItemType* const pNewItem = PushBack();
    3980  pNewItem->Value = value;
    3981  return pNewItem;
    3982 }
    3983 
    3984 template<typename T>
    3985 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3986 {
    3987  ItemType* const pNewItem = PushFront();
    3988  pNewItem->Value = value;
    3989  return pNewItem;
    3990 }
    3991 
    3992 template<typename T>
    3993 void VmaRawList<T>::PopBack()
    3994 {
    3995  VMA_HEAVY_ASSERT(m_Count > 0);
    3996  ItemType* const pBackItem = m_pBack;
    3997  ItemType* const pPrevItem = pBackItem->pPrev;
    3998  if(pPrevItem != VMA_NULL)
    3999  {
    4000  pPrevItem->pNext = VMA_NULL;
    4001  }
    4002  m_pBack = pPrevItem;
    4003  m_ItemAllocator.Free(pBackItem);
    4004  --m_Count;
    4005 }
    4006 
    4007 template<typename T>
    4008 void VmaRawList<T>::PopFront()
    4009 {
    4010  VMA_HEAVY_ASSERT(m_Count > 0);
    4011  ItemType* const pFrontItem = m_pFront;
    4012  ItemType* const pNextItem = pFrontItem->pNext;
    4013  if(pNextItem != VMA_NULL)
    4014  {
    4015  pNextItem->pPrev = VMA_NULL;
    4016  }
    4017  m_pFront = pNextItem;
    4018  m_ItemAllocator.Free(pFrontItem);
    4019  --m_Count;
    4020 }
    4021 
    4022 template<typename T>
    4023 void VmaRawList<T>::Remove(ItemType* pItem)
    4024 {
    4025  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4026  VMA_HEAVY_ASSERT(m_Count > 0);
    4027 
    4028  if(pItem->pPrev != VMA_NULL)
    4029  {
    4030  pItem->pPrev->pNext = pItem->pNext;
    4031  }
    4032  else
    4033  {
    4034  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4035  m_pFront = pItem->pNext;
    4036  }
    4037 
    4038  if(pItem->pNext != VMA_NULL)
    4039  {
    4040  pItem->pNext->pPrev = pItem->pPrev;
    4041  }
    4042  else
    4043  {
    4044  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4045  m_pBack = pItem->pPrev;
    4046  }
    4047 
    4048  m_ItemAllocator.Free(pItem);
    4049  --m_Count;
    4050 }
    4051 
    4052 template<typename T>
    4053 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4054 {
    4055  if(pItem != VMA_NULL)
    4056  {
    4057  ItemType* const prevItem = pItem->pPrev;
    4058  ItemType* const newItem = m_ItemAllocator.Alloc();
    4059  newItem->pPrev = prevItem;
    4060  newItem->pNext = pItem;
    4061  pItem->pPrev = newItem;
    4062  if(prevItem != VMA_NULL)
    4063  {
    4064  prevItem->pNext = newItem;
    4065  }
    4066  else
    4067  {
    4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4069  m_pFront = newItem;
    4070  }
    4071  ++m_Count;
    4072  return newItem;
    4073  }
    4074  else
    4075  return PushBack();
    4076 }
    4077 
    4078 template<typename T>
    4079 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4080 {
    4081  if(pItem != VMA_NULL)
    4082  {
    4083  ItemType* const nextItem = pItem->pNext;
    4084  ItemType* const newItem = m_ItemAllocator.Alloc();
    4085  newItem->pNext = nextItem;
    4086  newItem->pPrev = pItem;
    4087  pItem->pNext = newItem;
    4088  if(nextItem != VMA_NULL)
    4089  {
    4090  nextItem->pPrev = newItem;
    4091  }
    4092  else
    4093  {
    4094  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4095  m_pBack = newItem;
    4096  }
    4097  ++m_Count;
    4098  return newItem;
    4099  }
    4100  else
    4101  return PushFront();
    4102 }
    4103 
    4104 template<typename T>
    4105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4106 {
    4107  ItemType* const newItem = InsertBefore(pItem);
    4108  newItem->Value = value;
    4109  return newItem;
    4110 }
    4111 
    4112 template<typename T>
    4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4114 {
    4115  ItemType* const newItem = InsertAfter(pItem);
    4116  newItem->Value = value;
    4117  return newItem;
    4118 }
    4119 
    4120 template<typename T, typename AllocatorT>
    4121 class VmaList
    4122 {
    4123  VMA_CLASS_NO_COPY(VmaList)
    4124 public:
    4125  class iterator
    4126  {
    4127  public:
    4128  iterator() :
    4129  m_pList(VMA_NULL),
    4130  m_pItem(VMA_NULL)
    4131  {
    4132  }
    4133 
    4134  T& operator*() const
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4137  return m_pItem->Value;
    4138  }
    4139  T* operator->() const
    4140  {
    4141  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4142  return &m_pItem->Value;
    4143  }
    4144 
    4145  iterator& operator++()
    4146  {
    4147  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4148  m_pItem = m_pItem->pNext;
    4149  return *this;
    4150  }
    4151  iterator& operator--()
    4152  {
    4153  if(m_pItem != VMA_NULL)
    4154  {
    4155  m_pItem = m_pItem->pPrev;
    4156  }
    4157  else
    4158  {
    4159  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4160  m_pItem = m_pList->Back();
    4161  }
    4162  return *this;
    4163  }
    4164 
    4165  iterator operator++(int)
    4166  {
    4167  iterator result = *this;
    4168  ++*this;
    4169  return result;
    4170  }
    4171  iterator operator--(int)
    4172  {
    4173  iterator result = *this;
    4174  --*this;
    4175  return result;
    4176  }
    4177 
    4178  bool operator==(const iterator& rhs) const
    4179  {
    4180  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4181  return m_pItem == rhs.m_pItem;
    4182  }
    4183  bool operator!=(const iterator& rhs) const
    4184  {
    4185  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4186  return m_pItem != rhs.m_pItem;
    4187  }
    4188 
    4189  private:
    4190  VmaRawList<T>* m_pList;
    4191  VmaListItem<T>* m_pItem;
    4192 
    4193  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4194  m_pList(pList),
    4195  m_pItem(pItem)
    4196  {
    4197  }
    4198 
    4199  friend class VmaList<T, AllocatorT>;
    4200  };
    4201 
    4202  class const_iterator
    4203  {
    4204  public:
    4205  const_iterator() :
    4206  m_pList(VMA_NULL),
    4207  m_pItem(VMA_NULL)
    4208  {
    4209  }
    4210 
    4211  const_iterator(const iterator& src) :
    4212  m_pList(src.m_pList),
    4213  m_pItem(src.m_pItem)
    4214  {
    4215  }
    4216 
    4217  const T& operator*() const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4220  return m_pItem->Value;
    4221  }
    4222  const T* operator->() const
    4223  {
    4224  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4225  return &m_pItem->Value;
    4226  }
    4227 
    4228  const_iterator& operator++()
    4229  {
    4230  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4231  m_pItem = m_pItem->pNext;
    4232  return *this;
    4233  }
    4234  const_iterator& operator--()
    4235  {
    4236  if(m_pItem != VMA_NULL)
    4237  {
    4238  m_pItem = m_pItem->pPrev;
    4239  }
    4240  else
    4241  {
    4242  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4243  m_pItem = m_pList->Back();
    4244  }
    4245  return *this;
    4246  }
    4247 
    4248  const_iterator operator++(int)
    4249  {
    4250  const_iterator result = *this;
    4251  ++*this;
    4252  return result;
    4253  }
    4254  const_iterator operator--(int)
    4255  {
    4256  const_iterator result = *this;
    4257  --*this;
    4258  return result;
    4259  }
    4260 
    4261  bool operator==(const const_iterator& rhs) const
    4262  {
    4263  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4264  return m_pItem == rhs.m_pItem;
    4265  }
    4266  bool operator!=(const const_iterator& rhs) const
    4267  {
    4268  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4269  return m_pItem != rhs.m_pItem;
    4270  }
    4271 
    4272  private:
    4273  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4274  m_pList(pList),
    4275  m_pItem(pItem)
    4276  {
    4277  }
    4278 
    4279  const VmaRawList<T>* m_pList;
    4280  const VmaListItem<T>* m_pItem;
    4281 
    4282  friend class VmaList<T, AllocatorT>;
    4283  };
    4284 
    4285  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4286 
    4287  bool empty() const { return m_RawList.IsEmpty(); }
    4288  size_t size() const { return m_RawList.GetCount(); }
    4289 
    4290  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4291  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4292 
    4293  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4294  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4295 
    4296  void clear() { m_RawList.Clear(); }
    4297  void push_back(const T& value) { m_RawList.PushBack(value); }
    4298  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4299  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4300 
    4301 private:
    4302  VmaRawList<T> m_RawList;
    4303 };
    4304 
    4305 #endif // #if VMA_USE_STL_LIST
    4306 
    4308 // class VmaMap
    4309 
    4310 // Unused in this version.
    4311 #if 0
    4312 
    4313 #if VMA_USE_STL_UNORDERED_MAP
    4314 
    4315 #define VmaPair std::pair
    4316 
    4317 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4318  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4319 
    4320 #else // #if VMA_USE_STL_UNORDERED_MAP
    4321 
    4322 template<typename T1, typename T2>
    4323 struct VmaPair
    4324 {
    4325  T1 first;
    4326  T2 second;
    4327 
    4328  VmaPair() : first(), second() { }
    4329  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4330 };
    4331 
    4332 /* Class compatible with subset of interface of std::unordered_map.
    4333 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4334 */
    4335 template<typename KeyT, typename ValueT>
    4336 class VmaMap
    4337 {
    4338 public:
    4339  typedef VmaPair<KeyT, ValueT> PairType;
    4340  typedef PairType* iterator;
    4341 
    4342  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4343 
    4344  iterator begin() { return m_Vector.begin(); }
    4345  iterator end() { return m_Vector.end(); }
    4346 
    4347  void insert(const PairType& pair);
    4348  iterator find(const KeyT& key);
    4349  void erase(iterator it);
    4350 
    4351 private:
    4352  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4353 };
    4354 
    4355 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4356 
    4357 template<typename FirstT, typename SecondT>
    4358 struct VmaPairFirstLess
    4359 {
    4360  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4361  {
    4362  return lhs.first < rhs.first;
    4363  }
    4364  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4365  {
    4366  return lhs.first < rhsFirst;
    4367  }
    4368 };
    4369 
    4370 template<typename KeyT, typename ValueT>
    4371 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4372 {
    4373  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4374  m_Vector.data(),
    4375  m_Vector.data() + m_Vector.size(),
    4376  pair,
    4377  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4378  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4379 }
    4380 
    4381 template<typename KeyT, typename ValueT>
    4382 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4383 {
    4384  PairType* it = VmaBinaryFindFirstNotLess(
    4385  m_Vector.data(),
    4386  m_Vector.data() + m_Vector.size(),
    4387  key,
    4388  VmaPairFirstLess<KeyT, ValueT>());
    4389  if((it != m_Vector.end()) && (it->first == key))
    4390  {
    4391  return it;
    4392  }
    4393  else
    4394  {
    4395  return m_Vector.end();
    4396  }
    4397 }
    4398 
    4399 template<typename KeyT, typename ValueT>
    4400 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4401 {
    4402  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4403 }
    4404 
    4405 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4406 
    4407 #endif // #if 0
    4408 
    4410 
    4411 class VmaDeviceMemoryBlock;
    4412 
    4413 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4414 
    4415 struct VmaAllocation_T
    4416 {
    4417  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4418 private:
    4419  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4420 
    4421  enum FLAGS
    4422  {
    4423  FLAG_USER_DATA_STRING = 0x01,
    4424  };
    4425 
    4426 public:
    4427  enum ALLOCATION_TYPE
    4428  {
    4429  ALLOCATION_TYPE_NONE,
    4430  ALLOCATION_TYPE_BLOCK,
    4431  ALLOCATION_TYPE_DEDICATED,
    4432  };
    4433 
    4434  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4435  m_Alignment(1),
    4436  m_Size(0),
    4437  m_pUserData(VMA_NULL),
    4438  m_LastUseFrameIndex(currentFrameIndex),
    4439  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4440  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4441  m_MapCount(0),
    4442  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4443  {
    4444 #if VMA_STATS_STRING_ENABLED
    4445  m_CreationFrameIndex = currentFrameIndex;
    4446  m_BufferImageUsage = 0;
    4447 #endif
    4448  }
    4449 
    4450  ~VmaAllocation_T()
    4451  {
    4452  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4453 
    4454  // Check if owned string was freed.
    4455  VMA_ASSERT(m_pUserData == VMA_NULL);
    4456  }
    4457 
    4458  void InitBlockAllocation(
    4459  VmaPool hPool,
    4460  VmaDeviceMemoryBlock* block,
    4461  VkDeviceSize offset,
    4462  VkDeviceSize alignment,
    4463  VkDeviceSize size,
    4464  VmaSuballocationType suballocationType,
    4465  bool mapped,
    4466  bool canBecomeLost)
    4467  {
    4468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4469  VMA_ASSERT(block != VMA_NULL);
    4470  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4471  m_Alignment = alignment;
    4472  m_Size = size;
    4473  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4474  m_SuballocationType = (uint8_t)suballocationType;
    4475  m_BlockAllocation.m_hPool = hPool;
    4476  m_BlockAllocation.m_Block = block;
    4477  m_BlockAllocation.m_Offset = offset;
    4478  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4479  }
    4480 
    4481  void InitLost()
    4482  {
    4483  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4484  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4485  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4486  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4487  m_BlockAllocation.m_Block = VMA_NULL;
    4488  m_BlockAllocation.m_Offset = 0;
    4489  m_BlockAllocation.m_CanBecomeLost = true;
    4490  }
    4491 
    4492  void ChangeBlockAllocation(
    4493  VmaAllocator hAllocator,
    4494  VmaDeviceMemoryBlock* block,
    4495  VkDeviceSize offset);
    4496 
    4497  // pMappedData not null means allocation is created with MAPPED flag.
    4498  void InitDedicatedAllocation(
    4499  uint32_t memoryTypeIndex,
    4500  VkDeviceMemory hMemory,
    4501  VmaSuballocationType suballocationType,
    4502  void* pMappedData,
    4503  VkDeviceSize size)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4508  m_Alignment = 0;
    4509  m_Size = size;
    4510  m_SuballocationType = (uint8_t)suballocationType;
    4511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4512  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4513  m_DedicatedAllocation.m_hMemory = hMemory;
    4514  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4515  }
    4516 
    4517  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4518  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4519  VkDeviceSize GetSize() const { return m_Size; }
    4520  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4521  void* GetUserData() const { return m_pUserData; }
    4522  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4523  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4524 
    4525  VmaDeviceMemoryBlock* GetBlock() const
    4526  {
    4527  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4528  return m_BlockAllocation.m_Block;
    4529  }
    4530  VkDeviceSize GetOffset() const;
    4531  VkDeviceMemory GetMemory() const;
    4532  uint32_t GetMemoryTypeIndex() const;
    4533  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4534  void* GetMappedData() const;
    4535  bool CanBecomeLost() const;
    4536  VmaPool GetPool() const;
    4537 
    4538  uint32_t GetLastUseFrameIndex() const
    4539  {
    4540  return m_LastUseFrameIndex.load();
    4541  }
    4542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4543  {
    4544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4545  }
    4546  /*
    4547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4549  - Else, returns false.
    4550 
    4551  If hAllocation is already lost, assert - you should not call it then.
    4552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4553  */
    4554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4555 
    4556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4557  {
    4558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4559  outInfo.blockCount = 1;
    4560  outInfo.allocationCount = 1;
    4561  outInfo.unusedRangeCount = 0;
    4562  outInfo.usedBytes = m_Size;
    4563  outInfo.unusedBytes = 0;
    4564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4565  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4566  outInfo.unusedRangeSizeMax = 0;
    4567  }
    4568 
    4569  void BlockAllocMap();
    4570  void BlockAllocUnmap();
    4571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4573 
    4574 #if VMA_STATS_STRING_ENABLED
    4575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4577 
    4578  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4579  {
    4580  VMA_ASSERT(m_BufferImageUsage == 0);
    4581  m_BufferImageUsage = bufferImageUsage;
    4582  }
    4583 
    4584  void PrintParameters(class VmaJsonWriter& json) const;
    4585 #endif
    4586 
    4587 private:
    4588  VkDeviceSize m_Alignment;
    4589  VkDeviceSize m_Size;
    4590  void* m_pUserData;
    4591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4592  uint8_t m_Type; // ALLOCATION_TYPE
    4593  uint8_t m_SuballocationType; // VmaSuballocationType
    4594  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4595  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4596  uint8_t m_MapCount;
    4597  uint8_t m_Flags; // enum FLAGS
    4598 
    4599  // Allocation out of VmaDeviceMemoryBlock.
    4600  struct BlockAllocation
    4601  {
    4602  VmaPool m_hPool; // Null if belongs to general memory.
    4603  VmaDeviceMemoryBlock* m_Block;
    4604  VkDeviceSize m_Offset;
    4605  bool m_CanBecomeLost;
    4606  };
    4607 
    4608  // Allocation for an object that has its own private VkDeviceMemory.
    4609  struct DedicatedAllocation
    4610  {
    4611  uint32_t m_MemoryTypeIndex;
    4612  VkDeviceMemory m_hMemory;
    4613  void* m_pMappedData; // Not null means memory is mapped.
    4614  };
    4615 
    4616  union
    4617  {
    4618  // Allocation out of VmaDeviceMemoryBlock.
    4619  BlockAllocation m_BlockAllocation;
    4620  // Allocation for an object that has its own private VkDeviceMemory.
    4621  DedicatedAllocation m_DedicatedAllocation;
    4622  };
    4623 
    4624 #if VMA_STATS_STRING_ENABLED
    4625  uint32_t m_CreationFrameIndex;
    4626  uint32_t m_BufferImageUsage; // 0 if unknown.
    4627 #endif
    4628 
    4629  void FreeUserDataString(VmaAllocator hAllocator);
    4630 };
    4631 
    4632 /*
    4633 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4634 allocated memory block or free.
    4635 */
    4636 struct VmaSuballocation
    4637 {
    4638  VkDeviceSize offset;
    4639  VkDeviceSize size;
    4640  VmaAllocation hAllocation;
    4641  VmaSuballocationType type;
    4642 };
    4643 
    4644 // Comparator for offsets.
    4645 struct VmaSuballocationOffsetLess
    4646 {
    4647  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4648  {
    4649  return lhs.offset < rhs.offset;
    4650  }
    4651 };
    4652 struct VmaSuballocationOffsetGreater
    4653 {
    4654  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4655  {
    4656  return lhs.offset > rhs.offset;
    4657  }
    4658 };
    4659 
    4660 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4661 
    4662 // Cost of one additional allocation lost, as equivalent in bytes.
    4663 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4664 
    4665 /*
    4666 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4667 
    4668 If canMakeOtherLost was false:
    4669 - item points to a FREE suballocation.
    4670 - itemsToMakeLostCount is 0.
    4671 
    4672 If canMakeOtherLost was true:
    4673 - item points to first of sequence of suballocations, which are either FREE,
    4674  or point to VmaAllocations that can become lost.
    4675 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4676  the requested allocation to succeed.
    4677 */
    4678 struct VmaAllocationRequest
    4679 {
    4680  VkDeviceSize offset;
    4681  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4682  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4683  VmaSuballocationList::iterator item;
    4684  size_t itemsToMakeLostCount;
    4685  void* customData;
    4686 
    4687  VkDeviceSize CalcCost() const
    4688  {
    4689  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4690  }
    4691 };
    4692 
    4693 /*
    4694 Data structure used for bookkeeping of allocations and unused ranges of memory
    4695 in a single VkDeviceMemory block.
    4696 */
    4697 class VmaBlockMetadata
    4698 {
    4699 public:
    4700  VmaBlockMetadata(VmaAllocator hAllocator);
    4701  virtual ~VmaBlockMetadata() { }
    4702  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4703 
    4704  // Validates all data structures inside this object. If not valid, returns false.
    4705  virtual bool Validate() const = 0;
    4706  VkDeviceSize GetSize() const { return m_Size; }
    4707  virtual size_t GetAllocationCount() const = 0;
    4708  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4709  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4710  // Returns true if this block is empty - contains only single free suballocation.
    4711  virtual bool IsEmpty() const = 0;
    4712 
    4713  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4714  // Shouldn't modify blockCount.
    4715  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4716 
    4717 #if VMA_STATS_STRING_ENABLED
    4718  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4719 #endif
    4720 
    4721  // Tries to find a place for suballocation with given parameters inside this block.
    4722  // If succeeded, fills pAllocationRequest and returns true.
    4723  // If failed, returns false.
    4724  virtual bool CreateAllocationRequest(
    4725  uint32_t currentFrameIndex,
    4726  uint32_t frameInUseCount,
    4727  VkDeviceSize bufferImageGranularity,
    4728  VkDeviceSize allocSize,
    4729  VkDeviceSize allocAlignment,
    4730  bool upperAddress,
    4731  VmaSuballocationType allocType,
    4732  bool canMakeOtherLost,
    4733  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4734  VmaAllocationRequest* pAllocationRequest) = 0;
    4735 
    4736  virtual bool MakeRequestedAllocationsLost(
    4737  uint32_t currentFrameIndex,
    4738  uint32_t frameInUseCount,
    4739  VmaAllocationRequest* pAllocationRequest) = 0;
    4740 
    4741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4742 
    4743  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4744 
    4745  // Makes actual allocation based on request. Request must already be checked and valid.
    4746  virtual void Alloc(
    4747  const VmaAllocationRequest& request,
    4748  VmaSuballocationType type,
    4749  VkDeviceSize allocSize,
    4750  bool upperAddress,
    4751  VmaAllocation hAllocation) = 0;
    4752 
    4753  // Frees suballocation assigned to given memory region.
    4754  virtual void Free(const VmaAllocation allocation) = 0;
    4755  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4756 
    4757 protected:
    4758  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4759 
    4760 #if VMA_STATS_STRING_ENABLED
    4761  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4762  VkDeviceSize unusedBytes,
    4763  size_t allocationCount,
    4764  size_t unusedRangeCount) const;
    4765  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4766  VkDeviceSize offset,
    4767  VmaAllocation hAllocation) const;
    4768  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4769  VkDeviceSize offset,
    4770  VkDeviceSize size) const;
    4771  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4772 #endif
    4773 
    4774 private:
    4775  VkDeviceSize m_Size;
    4776  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4777 };
    4778 
    4779 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4780  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4781  return false; \
    4782  } } while(false)
    4783 
    4784 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4785 {
    4786  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4787 public:
    4788  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4789  virtual ~VmaBlockMetadata_Generic();
    4790  virtual void Init(VkDeviceSize size);
    4791 
    4792  virtual bool Validate() const;
    4793  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4794  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4795  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4796  virtual bool IsEmpty() const;
    4797 
    4798  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4799  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4800 
    4801 #if VMA_STATS_STRING_ENABLED
    4802  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4803 #endif
    4804 
    4805  virtual bool CreateAllocationRequest(
    4806  uint32_t currentFrameIndex,
    4807  uint32_t frameInUseCount,
    4808  VkDeviceSize bufferImageGranularity,
    4809  VkDeviceSize allocSize,
    4810  VkDeviceSize allocAlignment,
    4811  bool upperAddress,
    4812  VmaSuballocationType allocType,
    4813  bool canMakeOtherLost,
    4814  uint32_t strategy,
    4815  VmaAllocationRequest* pAllocationRequest);
    4816 
    4817  virtual bool MakeRequestedAllocationsLost(
    4818  uint32_t currentFrameIndex,
    4819  uint32_t frameInUseCount,
    4820  VmaAllocationRequest* pAllocationRequest);
    4821 
    4822  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4823 
    4824  virtual VkResult CheckCorruption(const void* pBlockData);
    4825 
    4826  virtual void Alloc(
    4827  const VmaAllocationRequest& request,
    4828  VmaSuballocationType type,
    4829  VkDeviceSize allocSize,
    4830  bool upperAddress,
    4831  VmaAllocation hAllocation);
    4832 
    4833  virtual void Free(const VmaAllocation allocation);
    4834  virtual void FreeAtOffset(VkDeviceSize offset);
    4835 
    4836 private:
    4837  uint32_t m_FreeCount;
    4838  VkDeviceSize m_SumFreeSize;
    4839  VmaSuballocationList m_Suballocations;
    4840  // Suballocations that are free and have size greater than certain threshold.
    4841  // Sorted by size, ascending.
    4842  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4843 
    4844  bool ValidateFreeSuballocationList() const;
    4845 
    4846  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4847  // If yes, fills pOffset and returns true. If no, returns false.
    4848  bool CheckAllocation(
    4849  uint32_t currentFrameIndex,
    4850  uint32_t frameInUseCount,
    4851  VkDeviceSize bufferImageGranularity,
    4852  VkDeviceSize allocSize,
    4853  VkDeviceSize allocAlignment,
    4854  VmaSuballocationType allocType,
    4855  VmaSuballocationList::const_iterator suballocItem,
    4856  bool canMakeOtherLost,
    4857  VkDeviceSize* pOffset,
    4858  size_t* itemsToMakeLostCount,
    4859  VkDeviceSize* pSumFreeSize,
    4860  VkDeviceSize* pSumItemSize) const;
    4861  // Given free suballocation, it merges it with following one, which must also be free.
    4862  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4863  // Releases given suballocation, making it free.
    4864  // Merges it with adjacent free suballocations if applicable.
    4865  // Returns iterator to new free suballocation at this place.
    4866  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4867  // Given free suballocation, it inserts it into sorted list of
    4868  // m_FreeSuballocationsBySize if it's suitable.
    4869  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4870  // Given free suballocation, it removes it from sorted list of
    4871  // m_FreeSuballocationsBySize if it's suitable.
    4872  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4873 };
    4874 
    4875 /*
    4876 Allocations and their references in internal data structure look like this:
    4877 
    4878 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4879 
    4880  0 +-------+
    4881  | |
    4882  | |
    4883  | |
    4884  +-------+
    4885  | Alloc | 1st[m_1stNullItemsBeginCount]
    4886  +-------+
    4887  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4888  +-------+
    4889  | ... |
    4890  +-------+
    4891  | Alloc | 1st[1st.size() - 1]
    4892  +-------+
    4893  | |
    4894  | |
    4895  | |
    4896 GetSize() +-------+
    4897 
    4898 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4899 
    4900  0 +-------+
    4901  | Alloc | 2nd[0]
    4902  +-------+
    4903  | Alloc | 2nd[1]
    4904  +-------+
    4905  | ... |
    4906  +-------+
    4907  | Alloc | 2nd[2nd.size() - 1]
    4908  +-------+
    4909  | |
    4910  | |
    4911  | |
    4912  +-------+
    4913  | Alloc | 1st[m_1stNullItemsBeginCount]
    4914  +-------+
    4915  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4916  +-------+
    4917  | ... |
    4918  +-------+
    4919  | Alloc | 1st[1st.size() - 1]
    4920  +-------+
    4921  | |
    4922 GetSize() +-------+
    4923 
    4924 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4925 
    4926  0 +-------+
    4927  | |
    4928  | |
    4929  | |
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount]
    4932  +-------+
    4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4934  +-------+
    4935  | ... |
    4936  +-------+
    4937  | Alloc | 1st[1st.size() - 1]
    4938  +-------+
    4939  | |
    4940  | |
    4941  | |
    4942  +-------+
    4943  | Alloc | 2nd[2nd.size() - 1]
    4944  +-------+
    4945  | ... |
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | Alloc | 2nd[0]
    4950 GetSize() +-------+
    4951 
    4952 */
    4953 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4954 {
    4955  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4956 public:
    4957  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4958  virtual ~VmaBlockMetadata_Linear();
    4959  virtual void Init(VkDeviceSize size);
    4960 
    4961  virtual bool Validate() const;
    4962  virtual size_t GetAllocationCount() const;
    4963  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4964  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4965  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4966 
    4967  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4968  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4969 
    4970 #if VMA_STATS_STRING_ENABLED
    4971  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4972 #endif
    4973 
    4974  virtual bool CreateAllocationRequest(
    4975  uint32_t currentFrameIndex,
    4976  uint32_t frameInUseCount,
    4977  VkDeviceSize bufferImageGranularity,
    4978  VkDeviceSize allocSize,
    4979  VkDeviceSize allocAlignment,
    4980  bool upperAddress,
    4981  VmaSuballocationType allocType,
    4982  bool canMakeOtherLost,
    4983  uint32_t strategy,
    4984  VmaAllocationRequest* pAllocationRequest);
    4985 
    4986  virtual bool MakeRequestedAllocationsLost(
    4987  uint32_t currentFrameIndex,
    4988  uint32_t frameInUseCount,
    4989  VmaAllocationRequest* pAllocationRequest);
    4990 
    4991  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4992 
    4993  virtual VkResult CheckCorruption(const void* pBlockData);
    4994 
    4995  virtual void Alloc(
    4996  const VmaAllocationRequest& request,
    4997  VmaSuballocationType type,
    4998  VkDeviceSize allocSize,
    4999  bool upperAddress,
    5000  VmaAllocation hAllocation);
    5001 
    5002  virtual void Free(const VmaAllocation allocation);
    5003  virtual void FreeAtOffset(VkDeviceSize offset);
    5004 
    5005 private:
    5006  /*
    5007  There are two suballocation vectors, used in ping-pong way.
    5008  The one with index m_1stVectorIndex is called 1st.
    5009  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5010  2nd can be non-empty only when 1st is not empty.
    5011  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5012  */
    5013  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5014 
    5015  enum SECOND_VECTOR_MODE
    5016  {
    5017  SECOND_VECTOR_EMPTY,
    5018  /*
    5019  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5020  all have smaller offset.
    5021  */
    5022  SECOND_VECTOR_RING_BUFFER,
    5023  /*
    5024  Suballocations in 2nd vector are upper side of double stack.
    5025  They all have offsets higher than those in 1st vector.
    5026  Top of this stack means smaller offsets, but higher indices in this vector.
    5027  */
    5028  SECOND_VECTOR_DOUBLE_STACK,
    5029  };
    5030 
    5031  VkDeviceSize m_SumFreeSize;
    5032  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5033  uint32_t m_1stVectorIndex;
    5034  SECOND_VECTOR_MODE m_2ndVectorMode;
    5035 
    5036  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5037  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5038  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5039  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5040 
    5041  // Number of items in 1st vector with hAllocation = null at the beginning.
    5042  size_t m_1stNullItemsBeginCount;
    5043  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5044  size_t m_1stNullItemsMiddleCount;
    5045  // Number of items in 2nd vector with hAllocation = null.
    5046  size_t m_2ndNullItemsCount;
    5047 
    5048  bool ShouldCompact1st() const;
    5049  void CleanupAfterFree();
    5050 };
    5051 
    5052 /*
    5053 - GetSize() is the original size of allocated memory block.
    5054 - m_UsableSize is this size aligned down to a power of two.
    5055  All allocations and calculations happen relative to m_UsableSize.
    5056 - GetUnusableSize() is the difference between them.
    5057  It is repoted as separate, unused range, not available for allocations.
    5058 
    5059 Node at level 0 has size = m_UsableSize.
    5060 Each next level contains nodes with size 2 times smaller than current level.
    5061 m_LevelCount is the maximum number of levels to use in the current object.
    5062 */
    5063 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5064 {
    5065  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5066 public:
    5067  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5068  virtual ~VmaBlockMetadata_Buddy();
    5069  virtual void Init(VkDeviceSize size);
    5070 
    5071  virtual bool Validate() const;
    5072  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5073  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5074  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5075  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5076 
    5077  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5078  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5079 
    5080 #if VMA_STATS_STRING_ENABLED
    5081  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5082 #endif
    5083 
    5084  virtual bool CreateAllocationRequest(
    5085  uint32_t currentFrameIndex,
    5086  uint32_t frameInUseCount,
    5087  VkDeviceSize bufferImageGranularity,
    5088  VkDeviceSize allocSize,
    5089  VkDeviceSize allocAlignment,
    5090  bool upperAddress,
    5091  VmaSuballocationType allocType,
    5092  bool canMakeOtherLost,
    5093  uint32_t strategy,
    5094  VmaAllocationRequest* pAllocationRequest);
    5095 
    5096  virtual bool MakeRequestedAllocationsLost(
    5097  uint32_t currentFrameIndex,
    5098  uint32_t frameInUseCount,
    5099  VmaAllocationRequest* pAllocationRequest);
    5100 
    5101  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5102 
    5103  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5104 
    5105  virtual void Alloc(
    5106  const VmaAllocationRequest& request,
    5107  VmaSuballocationType type,
    5108  VkDeviceSize allocSize,
    5109  bool upperAddress,
    5110  VmaAllocation hAllocation);
    5111 
    5112  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5113  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5114 
    5115 private:
    5116  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5117  static const size_t MAX_LEVELS = 30;
    5118 
    5119  struct ValidationContext
    5120  {
    5121  size_t calculatedAllocationCount;
    5122  size_t calculatedFreeCount;
    5123  VkDeviceSize calculatedSumFreeSize;
    5124 
    5125  ValidationContext() :
    5126  calculatedAllocationCount(0),
    5127  calculatedFreeCount(0),
    5128  calculatedSumFreeSize(0) { }
    5129  };
    5130 
    5131  struct Node
    5132  {
    5133  VkDeviceSize offset;
    5134  enum TYPE
    5135  {
    5136  TYPE_FREE,
    5137  TYPE_ALLOCATION,
    5138  TYPE_SPLIT,
    5139  TYPE_COUNT
    5140  } type;
    5141  Node* parent;
    5142  Node* buddy;
    5143 
    5144  union
    5145  {
    5146  struct
    5147  {
    5148  Node* prev;
    5149  Node* next;
    5150  } free;
    5151  struct
    5152  {
    5153  VmaAllocation alloc;
    5154  } allocation;
    5155  struct
    5156  {
    5157  Node* leftChild;
    5158  } split;
    5159  };
    5160  };
    5161 
    5162  // Size of the memory block aligned down to a power of two.
    5163  VkDeviceSize m_UsableSize;
    5164  uint32_t m_LevelCount;
    5165 
    5166  Node* m_Root;
    5167  struct {
    5168  Node* front;
    5169  Node* back;
    5170  } m_FreeList[MAX_LEVELS];
    5171  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5172  size_t m_AllocationCount;
    5173  // Number of nodes in the tree with type == TYPE_FREE.
    5174  size_t m_FreeCount;
    5175  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5176  VkDeviceSize m_SumFreeSize;
    5177 
    5178  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5179  void DeleteNode(Node* node);
    5180  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5181  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5182  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5183  // Alloc passed just for validation. Can be null.
    5184  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5185  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5186  // Adds node to the front of FreeList at given level.
    5187  // node->type must be FREE.
    5188  // node->free.prev, next can be undefined.
    5189  void AddToFreeListFront(uint32_t level, Node* node);
    5190  // Removes node from FreeList at given level.
    5191  // node->type must be FREE.
    5192  // node->free.prev, next stay untouched.
    5193  void RemoveFromFreeList(uint32_t level, Node* node);
    5194 
    5195 #if VMA_STATS_STRING_ENABLED
    5196  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5197 #endif
    5198 };
    5199 
    5200 /*
    5201 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5202 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5203 
    5204 Thread-safety: This class must be externally synchronized.
    5205 */
    5206 class VmaDeviceMemoryBlock
    5207 {
    5208  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5209 public:
    5210  VmaBlockMetadata* m_pMetadata;
    5211 
    5212  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5213 
    5214  ~VmaDeviceMemoryBlock()
    5215  {
    5216  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5217  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5218  }
    5219 
    5220  // Always call after construction.
    5221  void Init(
    5222  VmaAllocator hAllocator,
    5223  uint32_t newMemoryTypeIndex,
    5224  VkDeviceMemory newMemory,
    5225  VkDeviceSize newSize,
    5226  uint32_t id,
    5227  uint32_t algorithm);
    5228  // Always call before destruction.
    5229  void Destroy(VmaAllocator allocator);
    5230 
    5231  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5232  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5233  uint32_t GetId() const { return m_Id; }
    5234  void* GetMappedData() const { return m_pMappedData; }
    5235 
    5236  // Validates all data structures inside this object. If not valid, returns false.
    5237  bool Validate() const;
    5238 
    5239  VkResult CheckCorruption(VmaAllocator hAllocator);
    5240 
    5241  // ppData can be null.
    5242  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5243  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5244 
    5245  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5246  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5247 
    5248  VkResult BindBufferMemory(
    5249  const VmaAllocator hAllocator,
    5250  const VmaAllocation hAllocation,
    5251  VkBuffer hBuffer);
    5252  VkResult BindImageMemory(
    5253  const VmaAllocator hAllocator,
    5254  const VmaAllocation hAllocation,
    5255  VkImage hImage);
    5256 
    5257 private:
    5258  uint32_t m_MemoryTypeIndex;
    5259  uint32_t m_Id;
    5260  VkDeviceMemory m_hMemory;
    5261 
    5262  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5263  // Also protects m_MapCount, m_pMappedData.
    5264  VMA_MUTEX m_Mutex;
    5265  uint32_t m_MapCount;
    5266  void* m_pMappedData;
    5267 };
    5268 
    5269 struct VmaPointerLess
    5270 {
    5271  bool operator()(const void* lhs, const void* rhs) const
    5272  {
    5273  return lhs < rhs;
    5274  }
    5275 };
    5276 
    5277 class VmaDefragmentator;
    5278 
    5279 /*
    5280 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5281 Vulkan memory type.
    5282 
    5283 Synchronized internally with a mutex.
    5284 */
    5285 struct VmaBlockVector
    5286 {
    5287  VMA_CLASS_NO_COPY(VmaBlockVector)
    5288 public:
    5289  VmaBlockVector(
    5290  VmaAllocator hAllocator,
    5291  uint32_t memoryTypeIndex,
    5292  VkDeviceSize preferredBlockSize,
    5293  size_t minBlockCount,
    5294  size_t maxBlockCount,
    5295  VkDeviceSize bufferImageGranularity,
    5296  uint32_t frameInUseCount,
    5297  bool isCustomPool,
    5298  bool explicitBlockSize,
    5299  uint32_t algorithm);
    5300  ~VmaBlockVector();
    5301 
    5302  VkResult CreateMinBlocks();
    5303 
    5304  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5305  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5306  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5307  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5308  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5309 
    5310  void GetPoolStats(VmaPoolStats* pStats);
    5311 
    5312  bool IsEmpty() const { return m_Blocks.empty(); }
    5313  bool IsCorruptionDetectionEnabled() const;
    5314 
    5315  VkResult Allocate(
    5316  VmaPool hCurrentPool,
    5317  uint32_t currentFrameIndex,
    5318  VkDeviceSize size,
    5319  VkDeviceSize alignment,
    5320  const VmaAllocationCreateInfo& createInfo,
    5321  VmaSuballocationType suballocType,
    5322  VmaAllocation* pAllocation);
    5323 
    5324  void Free(
    5325  VmaAllocation hAllocation);
    5326 
    5327  // Adds statistics of this BlockVector to pStats.
    5328  void AddStats(VmaStats* pStats);
    5329 
    5330 #if VMA_STATS_STRING_ENABLED
    5331  void PrintDetailedMap(class VmaJsonWriter& json);
    5332 #endif
    5333 
    5334  void MakePoolAllocationsLost(
    5335  uint32_t currentFrameIndex,
    5336  size_t* pLostAllocationCount);
    5337  VkResult CheckCorruption();
    5338 
    5339  VmaDefragmentator* EnsureDefragmentator(
    5340  VmaAllocator hAllocator,
    5341  uint32_t currentFrameIndex);
    5342 
    5343  VkResult Defragment(
    5344  VmaDefragmentationStats* pDefragmentationStats,
    5345  VkDeviceSize& maxBytesToMove,
    5346  uint32_t& maxAllocationsToMove);
    5347 
    5348  void DestroyDefragmentator();
    5349 
    5350 private:
    5351  friend class VmaDefragmentator;
    5352 
    5353  const VmaAllocator m_hAllocator;
    5354  const uint32_t m_MemoryTypeIndex;
    5355  const VkDeviceSize m_PreferredBlockSize;
    5356  const size_t m_MinBlockCount;
    5357  const size_t m_MaxBlockCount;
    5358  const VkDeviceSize m_BufferImageGranularity;
    5359  const uint32_t m_FrameInUseCount;
    5360  const bool m_IsCustomPool;
    5361  const bool m_ExplicitBlockSize;
    5362  const uint32_t m_Algorithm;
    5363  bool m_HasEmptyBlock;
    5364  VMA_MUTEX m_Mutex;
    5365  // Incrementally sorted by sumFreeSize, ascending.
    5366  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5367  /* There can be at most one allocation that is completely empty - a
    5368  hysteresis to avoid pessimistic case of alternating creation and destruction
    5369  of a VkDeviceMemory. */
    5370  VmaDefragmentator* m_pDefragmentator;
    5371  uint32_t m_NextBlockId;
    5372 
    5373  VkDeviceSize CalcMaxBlockSize() const;
    5374 
    5375  // Finds and removes given block from vector.
    5376  void Remove(VmaDeviceMemoryBlock* pBlock);
    5377 
    5378  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5379  // after this call.
    5380  void IncrementallySortBlocks();
    5381 
    5382  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5383  VkResult AllocateFromBlock(
    5384  VmaDeviceMemoryBlock* pBlock,
    5385  VmaPool hCurrentPool,
    5386  uint32_t currentFrameIndex,
    5387  VkDeviceSize size,
    5388  VkDeviceSize alignment,
    5389  VmaAllocationCreateFlags allocFlags,
    5390  void* pUserData,
    5391  VmaSuballocationType suballocType,
    5392  uint32_t strategy,
    5393  VmaAllocation* pAllocation);
    5394 
    5395  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5396 };
    5397 
    5398 struct VmaPool_T
    5399 {
    5400  VMA_CLASS_NO_COPY(VmaPool_T)
    5401 public:
    5402  VmaBlockVector m_BlockVector;
    5403 
    5404  VmaPool_T(
    5405  VmaAllocator hAllocator,
    5406  const VmaPoolCreateInfo& createInfo,
    5407  VkDeviceSize preferredBlockSize);
    5408  ~VmaPool_T();
    5409 
    5410  uint32_t GetId() const { return m_Id; }
    5411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5412 
    5413 #if VMA_STATS_STRING_ENABLED
    5414  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5415 #endif
    5416 
    5417 private:
    5418  uint32_t m_Id;
    5419 };
    5420 
    5421 class VmaDefragmentator
    5422 {
    5423  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5424 private:
    5425  const VmaAllocator m_hAllocator;
    5426  VmaBlockVector* const m_pBlockVector;
    5427  uint32_t m_CurrentFrameIndex;
    5428  VkDeviceSize m_BytesMoved;
    5429  uint32_t m_AllocationsMoved;
    5430 
    5431  struct AllocationInfo
    5432  {
    5433  VmaAllocation m_hAllocation;
    5434  VkBool32* m_pChanged;
    5435 
    5436  AllocationInfo() :
    5437  m_hAllocation(VK_NULL_HANDLE),
    5438  m_pChanged(VMA_NULL)
    5439  {
    5440  }
    5441  };
    5442 
    5443  struct AllocationInfoSizeGreater
    5444  {
    5445  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5446  {
    5447  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5448  }
    5449  };
    5450 
    5451  // Used between AddAllocation and Defragment.
    5452  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5453 
    5454  struct BlockInfo
    5455  {
    5456  VmaDeviceMemoryBlock* m_pBlock;
    5457  bool m_HasNonMovableAllocations;
    5458  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5459 
    5460  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5461  m_pBlock(VMA_NULL),
    5462  m_HasNonMovableAllocations(true),
    5463  m_Allocations(pAllocationCallbacks),
    5464  m_pMappedDataForDefragmentation(VMA_NULL)
    5465  {
    5466  }
    5467 
    5468  void CalcHasNonMovableAllocations()
    5469  {
    5470  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5471  const size_t defragmentAllocCount = m_Allocations.size();
    5472  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5473  }
    5474 
    5475  void SortAllocationsBySizeDescecnding()
    5476  {
    5477  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5478  }
    5479 
    5480  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5481  void Unmap(VmaAllocator hAllocator);
    5482 
    5483  private:
    5484  // Not null if mapped for defragmentation only, not originally mapped.
    5485  void* m_pMappedDataForDefragmentation;
    5486  };
    5487 
    5488  struct BlockPointerLess
    5489  {
    5490  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5491  {
    5492  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5493  }
    5494  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5495  {
    5496  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5497  }
    5498  };
    5499 
    5500  // 1. Blocks with some non-movable allocations go first.
    5501  // 2. Blocks with smaller sumFreeSize go first.
    5502  struct BlockInfoCompareMoveDestination
    5503  {
    5504  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5505  {
    5506  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5507  {
    5508  return true;
    5509  }
    5510  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5511  {
    5512  return false;
    5513  }
    5514  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5515  {
    5516  return true;
    5517  }
    5518  return false;
    5519  }
    5520  };
    5521 
    5522  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5523  BlockInfoVector m_Blocks;
    5524 
    5525  VkResult DefragmentRound(
    5526  VkDeviceSize maxBytesToMove,
    5527  uint32_t maxAllocationsToMove);
    5528 
    5529  static bool MoveMakesSense(
    5530  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5531  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5532 
    5533 public:
    5534  VmaDefragmentator(
    5535  VmaAllocator hAllocator,
    5536  VmaBlockVector* pBlockVector,
    5537  uint32_t currentFrameIndex);
    5538 
    5539  ~VmaDefragmentator();
    5540 
    5541  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5542  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5543 
    5544  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5545 
    5546  VkResult Defragment(
    5547  VkDeviceSize maxBytesToMove,
    5548  uint32_t maxAllocationsToMove);
    5549 };
    5550 
    5551 #if VMA_RECORDING_ENABLED
    5552 
    5553 class VmaRecorder
    5554 {
    5555 public:
    5556  VmaRecorder();
    5557  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5558  void WriteConfiguration(
    5559  const VkPhysicalDeviceProperties& devProps,
    5560  const VkPhysicalDeviceMemoryProperties& memProps,
    5561  bool dedicatedAllocationExtensionEnabled);
    5562  ~VmaRecorder();
    5563 
    5564  void RecordCreateAllocator(uint32_t frameIndex);
    5565  void RecordDestroyAllocator(uint32_t frameIndex);
    5566  void RecordCreatePool(uint32_t frameIndex,
    5567  const VmaPoolCreateInfo& createInfo,
    5568  VmaPool pool);
    5569  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5570  void RecordAllocateMemory(uint32_t frameIndex,
    5571  const VkMemoryRequirements& vkMemReq,
    5572  const VmaAllocationCreateInfo& createInfo,
    5573  VmaAllocation allocation);
    5574  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5575  const VkMemoryRequirements& vkMemReq,
    5576  bool requiresDedicatedAllocation,
    5577  bool prefersDedicatedAllocation,
    5578  const VmaAllocationCreateInfo& createInfo,
    5579  VmaAllocation allocation);
    5580  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5581  const VkMemoryRequirements& vkMemReq,
    5582  bool requiresDedicatedAllocation,
    5583  bool prefersDedicatedAllocation,
    5584  const VmaAllocationCreateInfo& createInfo,
    5585  VmaAllocation allocation);
    5586  void RecordFreeMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordSetAllocationUserData(uint32_t frameIndex,
    5589  VmaAllocation allocation,
    5590  const void* pUserData);
    5591  void RecordCreateLostAllocation(uint32_t frameIndex,
    5592  VmaAllocation allocation);
    5593  void RecordMapMemory(uint32_t frameIndex,
    5594  VmaAllocation allocation);
    5595  void RecordUnmapMemory(uint32_t frameIndex,
    5596  VmaAllocation allocation);
    5597  void RecordFlushAllocation(uint32_t frameIndex,
    5598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5599  void RecordInvalidateAllocation(uint32_t frameIndex,
    5600  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5601  void RecordCreateBuffer(uint32_t frameIndex,
    5602  const VkBufferCreateInfo& bufCreateInfo,
    5603  const VmaAllocationCreateInfo& allocCreateInfo,
    5604  VmaAllocation allocation);
    5605  void RecordCreateImage(uint32_t frameIndex,
    5606  const VkImageCreateInfo& imageCreateInfo,
    5607  const VmaAllocationCreateInfo& allocCreateInfo,
    5608  VmaAllocation allocation);
    5609  void RecordDestroyBuffer(uint32_t frameIndex,
    5610  VmaAllocation allocation);
    5611  void RecordDestroyImage(uint32_t frameIndex,
    5612  VmaAllocation allocation);
    5613  void RecordTouchAllocation(uint32_t frameIndex,
    5614  VmaAllocation allocation);
    5615  void RecordGetAllocationInfo(uint32_t frameIndex,
    5616  VmaAllocation allocation);
    5617  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5618  VmaPool pool);
    5619 
    5620 private:
    5621  struct CallParams
    5622  {
    5623  uint32_t threadId;
    5624  double time;
    5625  };
    5626 
    5627  class UserDataString
    5628  {
    5629  public:
    5630  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5631  const char* GetString() const { return m_Str; }
    5632 
    5633  private:
    5634  char m_PtrStr[17];
    5635  const char* m_Str;
    5636  };
    5637 
    5638  bool m_UseMutex;
    5639  VmaRecordFlags m_Flags;
    5640  FILE* m_File;
    5641  VMA_MUTEX m_FileMutex;
    5642  int64_t m_Freq;
    5643  int64_t m_StartCounter;
    5644 
    5645  void GetBasicParams(CallParams& outParams);
    5646  void Flush();
    5647 };
    5648 
    5649 #endif // #if VMA_RECORDING_ENABLED
    5650 
    5651 // Main allocator object.
    5652 struct VmaAllocator_T
    5653 {
    5654  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5655 public:
    5656  bool m_UseMutex;
    5657  bool m_UseKhrDedicatedAllocation;
    5658  VkDevice m_hDevice;
    5659  bool m_AllocationCallbacksSpecified;
    5660  VkAllocationCallbacks m_AllocationCallbacks;
    5661  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5662 
    5663  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5664  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5665  VMA_MUTEX m_HeapSizeLimitMutex;
    5666 
    5667  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5668  VkPhysicalDeviceMemoryProperties m_MemProps;
    5669 
    5670  // Default pools.
    5671  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5672 
    5673  // Each vector is sorted by memory (handle value).
    5674  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5675  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5676  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5677 
    5678  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5679  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5680  ~VmaAllocator_T();
    5681 
    5682  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5683  {
    5684  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5685  }
    5686  const VmaVulkanFunctions& GetVulkanFunctions() const
    5687  {
    5688  return m_VulkanFunctions;
    5689  }
    5690 
    5691  VkDeviceSize GetBufferImageGranularity() const
    5692  {
    5693  return VMA_MAX(
    5694  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5695  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5696  }
    5697 
    5698  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5699  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5700 
    5701  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5702  {
    5703  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5704  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5705  }
    5706  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5707  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5708  {
    5709  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5710  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5711  }
    5712  // Minimum alignment for all allocations in specific memory type.
    5713  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5714  {
    5715  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5716  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5717  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5718  }
    5719 
    5720  bool IsIntegratedGpu() const
    5721  {
    5722  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5723  }
    5724 
    5725 #if VMA_RECORDING_ENABLED
    5726  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5727 #endif
    5728 
    5729  void GetBufferMemoryRequirements(
    5730  VkBuffer hBuffer,
    5731  VkMemoryRequirements& memReq,
    5732  bool& requiresDedicatedAllocation,
    5733  bool& prefersDedicatedAllocation) const;
    5734  void GetImageMemoryRequirements(
    5735  VkImage hImage,
    5736  VkMemoryRequirements& memReq,
    5737  bool& requiresDedicatedAllocation,
    5738  bool& prefersDedicatedAllocation) const;
    5739 
    5740  // Main allocation function.
    5741  VkResult AllocateMemory(
    5742  const VkMemoryRequirements& vkMemReq,
    5743  bool requiresDedicatedAllocation,
    5744  bool prefersDedicatedAllocation,
    5745  VkBuffer dedicatedBuffer,
    5746  VkImage dedicatedImage,
    5747  const VmaAllocationCreateInfo& createInfo,
    5748  VmaSuballocationType suballocType,
    5749  VmaAllocation* pAllocation);
    5750 
    5751  // Main deallocation function.
    5752  void FreeMemory(const VmaAllocation allocation);
    5753 
    5754  void CalculateStats(VmaStats* pStats);
    5755 
    5756 #if VMA_STATS_STRING_ENABLED
    5757  void PrintDetailedMap(class VmaJsonWriter& json);
    5758 #endif
    5759 
    5760  VkResult Defragment(
    5761  VmaAllocation* pAllocations,
    5762  size_t allocationCount,
    5763  VkBool32* pAllocationsChanged,
    5764  const VmaDefragmentationInfo* pDefragmentationInfo,
    5765  VmaDefragmentationStats* pDefragmentationStats);
    5766 
    5767  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5768  bool TouchAllocation(VmaAllocation hAllocation);
    5769 
    5770  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5771  void DestroyPool(VmaPool pool);
    5772  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5773 
    5774  void SetCurrentFrameIndex(uint32_t frameIndex);
    5775  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5776 
    5777  void MakePoolAllocationsLost(
    5778  VmaPool hPool,
    5779  size_t* pLostAllocationCount);
    5780  VkResult CheckPoolCorruption(VmaPool hPool);
    5781  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5782 
    5783  void CreateLostAllocation(VmaAllocation* pAllocation);
    5784 
    5785  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5786  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5787 
    5788  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5789  void Unmap(VmaAllocation hAllocation);
    5790 
    5791  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5792  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5793 
    5794  void FlushOrInvalidateAllocation(
    5795  VmaAllocation hAllocation,
    5796  VkDeviceSize offset, VkDeviceSize size,
    5797  VMA_CACHE_OPERATION op);
    5798 
    5799  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5800 
    5801 private:
    5802  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5803 
    5804  VkPhysicalDevice m_PhysicalDevice;
    5805  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5806 
    5807  VMA_MUTEX m_PoolsMutex;
    5808  // Protected by m_PoolsMutex. Sorted by pointer value.
    5809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5810  uint32_t m_NextPoolId;
    5811 
    5812  VmaVulkanFunctions m_VulkanFunctions;
    5813 
    5814 #if VMA_RECORDING_ENABLED
    5815  VmaRecorder* m_pRecorder;
    5816 #endif
    5817 
    5818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5819 
    5820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5821 
    5822  VkResult AllocateMemoryOfType(
    5823  VkDeviceSize size,
    5824  VkDeviceSize alignment,
    5825  bool dedicatedAllocation,
    5826  VkBuffer dedicatedBuffer,
    5827  VkImage dedicatedImage,
    5828  const VmaAllocationCreateInfo& createInfo,
    5829  uint32_t memTypeIndex,
    5830  VmaSuballocationType suballocType,
    5831  VmaAllocation* pAllocation);
    5832 
    5833  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5834  VkResult AllocateDedicatedMemory(
    5835  VkDeviceSize size,
    5836  VmaSuballocationType suballocType,
    5837  uint32_t memTypeIndex,
    5838  bool map,
    5839  bool isUserDataString,
    5840  void* pUserData,
    5841  VkBuffer dedicatedBuffer,
    5842  VkImage dedicatedImage,
    5843  VmaAllocation* pAllocation);
    5844 
    5845  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5846  void FreeDedicatedMemory(VmaAllocation allocation);
    5847 };
    5848 
    5850 // Memory allocation #2 after VmaAllocator_T definition
    5851 
    5852 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5853 {
    5854  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5855 }
    5856 
    5857 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5858 {
    5859  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5860 }
    5861 
    5862 template<typename T>
    5863 static T* VmaAllocate(VmaAllocator hAllocator)
    5864 {
    5865  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5866 }
    5867 
    5868 template<typename T>
    5869 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5870 {
    5871  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5872 }
    5873 
    5874 template<typename T>
    5875 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5876 {
    5877  if(ptr != VMA_NULL)
    5878  {
    5879  ptr->~T();
    5880  VmaFree(hAllocator, ptr);
    5881  }
    5882 }
    5883 
    5884 template<typename T>
    5885 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5886 {
    5887  if(ptr != VMA_NULL)
    5888  {
    5889  for(size_t i = count; i--; )
    5890  ptr[i].~T();
    5891  VmaFree(hAllocator, ptr);
    5892  }
    5893 }
    5894 
    5896 // VmaStringBuilder
    5897 
    5898 #if VMA_STATS_STRING_ENABLED
    5899 
    5900 class VmaStringBuilder
    5901 {
    5902 public:
    5903  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5904  size_t GetLength() const { return m_Data.size(); }
    5905  const char* GetData() const { return m_Data.data(); }
    5906 
    5907  void Add(char ch) { m_Data.push_back(ch); }
    5908  void Add(const char* pStr);
    5909  void AddNewLine() { Add('\n'); }
    5910  void AddNumber(uint32_t num);
    5911  void AddNumber(uint64_t num);
    5912  void AddPointer(const void* ptr);
    5913 
    5914 private:
    5915  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5916 };
    5917 
    5918 void VmaStringBuilder::Add(const char* pStr)
    5919 {
    5920  const size_t strLen = strlen(pStr);
    5921  if(strLen > 0)
    5922  {
    5923  const size_t oldCount = m_Data.size();
    5924  m_Data.resize(oldCount + strLen);
    5925  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5926  }
    5927 }
    5928 
    5929 void VmaStringBuilder::AddNumber(uint32_t num)
    5930 {
    5931  char buf[11];
    5932  VmaUint32ToStr(buf, sizeof(buf), num);
    5933  Add(buf);
    5934 }
    5935 
    5936 void VmaStringBuilder::AddNumber(uint64_t num)
    5937 {
    5938  char buf[21];
    5939  VmaUint64ToStr(buf, sizeof(buf), num);
    5940  Add(buf);
    5941 }
    5942 
    5943 void VmaStringBuilder::AddPointer(const void* ptr)
    5944 {
    5945  char buf[21];
    5946  VmaPtrToStr(buf, sizeof(buf), ptr);
    5947  Add(buf);
    5948 }
    5949 
    5950 #endif // #if VMA_STATS_STRING_ENABLED
    5951 
    5953 // VmaJsonWriter
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaJsonWriter
    5958 {
    5959  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5960 public:
    5961  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5962  ~VmaJsonWriter();
    5963 
    5964  void BeginObject(bool singleLine = false);
    5965  void EndObject();
    5966 
    5967  void BeginArray(bool singleLine = false);
    5968  void EndArray();
    5969 
    5970  void WriteString(const char* pStr);
    5971  void BeginString(const char* pStr = VMA_NULL);
    5972  void ContinueString(const char* pStr);
    5973  void ContinueString(uint32_t n);
    5974  void ContinueString(uint64_t n);
    5975  void ContinueString_Pointer(const void* ptr);
    5976  void EndString(const char* pStr = VMA_NULL);
    5977 
    5978  void WriteNumber(uint32_t n);
    5979  void WriteNumber(uint64_t n);
    5980  void WriteBool(bool b);
    5981  void WriteNull();
    5982 
    5983 private:
    5984  static const char* const INDENT;
    5985 
    5986  enum COLLECTION_TYPE
    5987  {
    5988  COLLECTION_TYPE_OBJECT,
    5989  COLLECTION_TYPE_ARRAY,
    5990  };
    5991  struct StackItem
    5992  {
    5993  COLLECTION_TYPE type;
    5994  uint32_t valueCount;
    5995  bool singleLineMode;
    5996  };
    5997 
    5998  VmaStringBuilder& m_SB;
    5999  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6000  bool m_InsideString;
    6001 
    6002  void BeginValue(bool isString);
    6003  void WriteIndent(bool oneLess = false);
    6004 };
    6005 
    6006 const char* const VmaJsonWriter::INDENT = " ";
    6007 
    6008 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6009  m_SB(sb),
    6010  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6011  m_InsideString(false)
    6012 {
    6013 }
    6014 
    6015 VmaJsonWriter::~VmaJsonWriter()
    6016 {
    6017  VMA_ASSERT(!m_InsideString);
    6018  VMA_ASSERT(m_Stack.empty());
    6019 }
    6020 
    6021 void VmaJsonWriter::BeginObject(bool singleLine)
    6022 {
    6023  VMA_ASSERT(!m_InsideString);
    6024 
    6025  BeginValue(false);
    6026  m_SB.Add('{');
    6027 
    6028  StackItem item;
    6029  item.type = COLLECTION_TYPE_OBJECT;
    6030  item.valueCount = 0;
    6031  item.singleLineMode = singleLine;
    6032  m_Stack.push_back(item);
    6033 }
    6034 
    6035 void VmaJsonWriter::EndObject()
    6036 {
    6037  VMA_ASSERT(!m_InsideString);
    6038 
    6039  WriteIndent(true);
    6040  m_SB.Add('}');
    6041 
    6042  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6043  m_Stack.pop_back();
    6044 }
    6045 
    6046 void VmaJsonWriter::BeginArray(bool singleLine)
    6047 {
    6048  VMA_ASSERT(!m_InsideString);
    6049 
    6050  BeginValue(false);
    6051  m_SB.Add('[');
    6052 
    6053  StackItem item;
    6054  item.type = COLLECTION_TYPE_ARRAY;
    6055  item.valueCount = 0;
    6056  item.singleLineMode = singleLine;
    6057  m_Stack.push_back(item);
    6058 }
    6059 
    6060 void VmaJsonWriter::EndArray()
    6061 {
    6062  VMA_ASSERT(!m_InsideString);
    6063 
    6064  WriteIndent(true);
    6065  m_SB.Add(']');
    6066 
    6067  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6068  m_Stack.pop_back();
    6069 }
    6070 
    6071 void VmaJsonWriter::WriteString(const char* pStr)
    6072 {
    6073  BeginString(pStr);
    6074  EndString();
    6075 }
    6076 
    6077 void VmaJsonWriter::BeginString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(!m_InsideString);
    6080 
    6081  BeginValue(true);
    6082  m_SB.Add('"');
    6083  m_InsideString = true;
    6084  if(pStr != VMA_NULL && pStr[0] != '\0')
    6085  {
    6086  ContinueString(pStr);
    6087  }
    6088 }
    6089 
    6090 void VmaJsonWriter::ContinueString(const char* pStr)
    6091 {
    6092  VMA_ASSERT(m_InsideString);
    6093 
    6094  const size_t strLen = strlen(pStr);
    6095  for(size_t i = 0; i < strLen; ++i)
    6096  {
    6097  char ch = pStr[i];
    6098  if(ch == '\\')
    6099  {
    6100  m_SB.Add("\\\\");
    6101  }
    6102  else if(ch == '"')
    6103  {
    6104  m_SB.Add("\\\"");
    6105  }
    6106  else if(ch >= 32)
    6107  {
    6108  m_SB.Add(ch);
    6109  }
    6110  else switch(ch)
    6111  {
    6112  case '\b':
    6113  m_SB.Add("\\b");
    6114  break;
    6115  case '\f':
    6116  m_SB.Add("\\f");
    6117  break;
    6118  case '\n':
    6119  m_SB.Add("\\n");
    6120  break;
    6121  case '\r':
    6122  m_SB.Add("\\r");
    6123  break;
    6124  case '\t':
    6125  m_SB.Add("\\t");
    6126  break;
    6127  default:
    6128  VMA_ASSERT(0 && "Character not currently supported.");
    6129  break;
    6130  }
    6131  }
    6132 }
    6133 
    6134 void VmaJsonWriter::ContinueString(uint32_t n)
    6135 {
    6136  VMA_ASSERT(m_InsideString);
    6137  m_SB.AddNumber(n);
    6138 }
    6139 
    6140 void VmaJsonWriter::ContinueString(uint64_t n)
    6141 {
    6142  VMA_ASSERT(m_InsideString);
    6143  m_SB.AddNumber(n);
    6144 }
    6145 
    6146 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6147 {
    6148  VMA_ASSERT(m_InsideString);
    6149  m_SB.AddPointer(ptr);
    6150 }
    6151 
    6152 void VmaJsonWriter::EndString(const char* pStr)
    6153 {
    6154  VMA_ASSERT(m_InsideString);
    6155  if(pStr != VMA_NULL && pStr[0] != '\0')
    6156  {
    6157  ContinueString(pStr);
    6158  }
    6159  m_SB.Add('"');
    6160  m_InsideString = false;
    6161 }
    6162 
    6163 void VmaJsonWriter::WriteNumber(uint32_t n)
    6164 {
    6165  VMA_ASSERT(!m_InsideString);
    6166  BeginValue(false);
    6167  m_SB.AddNumber(n);
    6168 }
    6169 
    6170 void VmaJsonWriter::WriteNumber(uint64_t n)
    6171 {
    6172  VMA_ASSERT(!m_InsideString);
    6173  BeginValue(false);
    6174  m_SB.AddNumber(n);
    6175 }
    6176 
    6177 void VmaJsonWriter::WriteBool(bool b)
    6178 {
    6179  VMA_ASSERT(!m_InsideString);
    6180  BeginValue(false);
    6181  m_SB.Add(b ? "true" : "false");
    6182 }
    6183 
    6184 void VmaJsonWriter::WriteNull()
    6185 {
    6186  VMA_ASSERT(!m_InsideString);
    6187  BeginValue(false);
    6188  m_SB.Add("null");
    6189 }
    6190 
    6191 void VmaJsonWriter::BeginValue(bool isString)
    6192 {
    6193  if(!m_Stack.empty())
    6194  {
    6195  StackItem& currItem = m_Stack.back();
    6196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6197  currItem.valueCount % 2 == 0)
    6198  {
    6199  VMA_ASSERT(isString);
    6200  }
    6201 
    6202  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6203  currItem.valueCount % 2 != 0)
    6204  {
    6205  m_SB.Add(": ");
    6206  }
    6207  else if(currItem.valueCount > 0)
    6208  {
    6209  m_SB.Add(", ");
    6210  WriteIndent();
    6211  }
    6212  else
    6213  {
    6214  WriteIndent();
    6215  }
    6216  ++currItem.valueCount;
    6217  }
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteIndent(bool oneLess)
    6221 {
    6222  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6223  {
    6224  m_SB.AddNewLine();
    6225 
    6226  size_t count = m_Stack.size();
    6227  if(count > 0 && oneLess)
    6228  {
    6229  --count;
    6230  }
    6231  for(size_t i = 0; i < count; ++i)
    6232  {
    6233  m_SB.Add(INDENT);
    6234  }
    6235  }
    6236 }
    6237 
    6238 #endif // #if VMA_STATS_STRING_ENABLED
    6239 
    6241 
    6242 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6243 {
    6244  if(IsUserDataString())
    6245  {
    6246  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6247 
    6248  FreeUserDataString(hAllocator);
    6249 
    6250  if(pUserData != VMA_NULL)
    6251  {
    6252  const char* const newStrSrc = (char*)pUserData;
    6253  const size_t newStrLen = strlen(newStrSrc);
    6254  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6255  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6256  m_pUserData = newStrDst;
    6257  }
    6258  }
    6259  else
    6260  {
    6261  m_pUserData = pUserData;
    6262  }
    6263 }
    6264 
    6265 void VmaAllocation_T::ChangeBlockAllocation(
    6266  VmaAllocator hAllocator,
    6267  VmaDeviceMemoryBlock* block,
    6268  VkDeviceSize offset)
    6269 {
    6270  VMA_ASSERT(block != VMA_NULL);
    6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6272 
    6273  // Move mapping reference counter from old block to new block.
    6274  if(block != m_BlockAllocation.m_Block)
    6275  {
    6276  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6277  if(IsPersistentMap())
    6278  ++mapRefCount;
    6279  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6280  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6281  }
    6282 
    6283  m_BlockAllocation.m_Block = block;
    6284  m_BlockAllocation.m_Offset = offset;
    6285 }
    6286 
    6287 VkDeviceSize VmaAllocation_T::GetOffset() const
    6288 {
    6289  switch(m_Type)
    6290  {
    6291  case ALLOCATION_TYPE_BLOCK:
    6292  return m_BlockAllocation.m_Offset;
    6293  case ALLOCATION_TYPE_DEDICATED:
    6294  return 0;
    6295  default:
    6296  VMA_ASSERT(0);
    6297  return 0;
    6298  }
    6299 }
    6300 
    6301 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6302 {
    6303  switch(m_Type)
    6304  {
    6305  case ALLOCATION_TYPE_BLOCK:
    6306  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6307  case ALLOCATION_TYPE_DEDICATED:
    6308  return m_DedicatedAllocation.m_hMemory;
    6309  default:
    6310  VMA_ASSERT(0);
    6311  return VK_NULL_HANDLE;
    6312  }
    6313 }
    6314 
    6315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6316 {
    6317  switch(m_Type)
    6318  {
    6319  case ALLOCATION_TYPE_BLOCK:
    6320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6321  case ALLOCATION_TYPE_DEDICATED:
    6322  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6323  default:
    6324  VMA_ASSERT(0);
    6325  return UINT32_MAX;
    6326  }
    6327 }
    6328 
    6329 void* VmaAllocation_T::GetMappedData() const
    6330 {
    6331  switch(m_Type)
    6332  {
    6333  case ALLOCATION_TYPE_BLOCK:
    6334  if(m_MapCount != 0)
    6335  {
    6336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6337  VMA_ASSERT(pBlockData != VMA_NULL);
    6338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6339  }
    6340  else
    6341  {
    6342  return VMA_NULL;
    6343  }
    6344  break;
    6345  case ALLOCATION_TYPE_DEDICATED:
    6346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6347  return m_DedicatedAllocation.m_pMappedData;
    6348  default:
    6349  VMA_ASSERT(0);
    6350  return VMA_NULL;
    6351  }
    6352 }
    6353 
    6354 bool VmaAllocation_T::CanBecomeLost() const
    6355 {
    6356  switch(m_Type)
    6357  {
    6358  case ALLOCATION_TYPE_BLOCK:
    6359  return m_BlockAllocation.m_CanBecomeLost;
    6360  case ALLOCATION_TYPE_DEDICATED:
    6361  return false;
    6362  default:
    6363  VMA_ASSERT(0);
    6364  return false;
    6365  }
    6366 }
    6367 
    6368 VmaPool VmaAllocation_T::GetPool() const
    6369 {
    6370  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6371  return m_BlockAllocation.m_hPool;
    6372 }
    6373 
    6374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6375 {
    6376  VMA_ASSERT(CanBecomeLost());
    6377 
    6378  /*
    6379  Warning: This is a carefully designed algorithm.
    6380  Do not modify unless you really know what you're doing :)
    6381  */
    6382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6383  for(;;)
    6384  {
    6385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6386  {
    6387  VMA_ASSERT(0);
    6388  return false;
    6389  }
    6390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6391  {
    6392  return false;
    6393  }
    6394  else // Last use time earlier than current time.
    6395  {
    6396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6397  {
    6398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6400  return true;
    6401  }
    6402  }
    6403  }
    6404 }
    6405 
    6406 #if VMA_STATS_STRING_ENABLED
    6407 
    6408 // Correspond to values of enum VmaSuballocationType.
    6409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6410  "FREE",
    6411  "UNKNOWN",
    6412  "BUFFER",
    6413  "IMAGE_UNKNOWN",
    6414  "IMAGE_LINEAR",
    6415  "IMAGE_OPTIMAL",
    6416 };
    6417 
    6418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6419 {
    6420  json.WriteString("Type");
    6421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6422 
    6423  json.WriteString("Size");
    6424  json.WriteNumber(m_Size);
    6425 
    6426  if(m_pUserData != VMA_NULL)
    6427  {
    6428  json.WriteString("UserData");
    6429  if(IsUserDataString())
    6430  {
    6431  json.WriteString((const char*)m_pUserData);
    6432  }
    6433  else
    6434  {
    6435  json.BeginString();
    6436  json.ContinueString_Pointer(m_pUserData);
    6437  json.EndString();
    6438  }
    6439  }
    6440 
    6441  json.WriteString("CreationFrameIndex");
    6442  json.WriteNumber(m_CreationFrameIndex);
    6443 
    6444  json.WriteString("LastUseFrameIndex");
    6445  json.WriteNumber(GetLastUseFrameIndex());
    6446 
    6447  if(m_BufferImageUsage != 0)
    6448  {
    6449  json.WriteString("Usage");
    6450  json.WriteNumber(m_BufferImageUsage);
    6451  }
    6452 }
    6453 
    6454 #endif
    6455 
    6456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6457 {
    6458  VMA_ASSERT(IsUserDataString());
    6459  if(m_pUserData != VMA_NULL)
    6460  {
    6461  char* const oldStr = (char*)m_pUserData;
    6462  const size_t oldStrLen = strlen(oldStr);
    6463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6464  m_pUserData = VMA_NULL;
    6465  }
    6466 }
    6467 
    6468 void VmaAllocation_T::BlockAllocMap()
    6469 {
    6470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6471 
    6472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6473  {
    6474  ++m_MapCount;
    6475  }
    6476  else
    6477  {
    6478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6479  }
    6480 }
    6481 
    6482 void VmaAllocation_T::BlockAllocUnmap()
    6483 {
    6484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6485 
    6486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6487  {
    6488  --m_MapCount;
    6489  }
    6490  else
    6491  {
    6492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6493  }
    6494 }
    6495 
    6496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6497 {
    6498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6499 
    6500  if(m_MapCount != 0)
    6501  {
    6502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6503  {
    6504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6505  *ppData = m_DedicatedAllocation.m_pMappedData;
    6506  ++m_MapCount;
    6507  return VK_SUCCESS;
    6508  }
    6509  else
    6510  {
    6511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6512  return VK_ERROR_MEMORY_MAP_FAILED;
    6513  }
    6514  }
    6515  else
    6516  {
    6517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6518  hAllocator->m_hDevice,
    6519  m_DedicatedAllocation.m_hMemory,
    6520  0, // offset
    6521  VK_WHOLE_SIZE,
    6522  0, // flags
    6523  ppData);
    6524  if(result == VK_SUCCESS)
    6525  {
    6526  m_DedicatedAllocation.m_pMappedData = *ppData;
    6527  m_MapCount = 1;
    6528  }
    6529  return result;
    6530  }
    6531 }
    6532 
    6533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6534 {
    6535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6536 
    6537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6538  {
    6539  --m_MapCount;
    6540  if(m_MapCount == 0)
    6541  {
    6542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6544  hAllocator->m_hDevice,
    6545  m_DedicatedAllocation.m_hMemory);
    6546  }
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 #if VMA_STATS_STRING_ENABLED
    6555 
    6556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6557 {
    6558  json.BeginObject();
    6559 
    6560  json.WriteString("Blocks");
    6561  json.WriteNumber(stat.blockCount);
    6562 
    6563  json.WriteString("Allocations");
    6564  json.WriteNumber(stat.allocationCount);
    6565 
    6566  json.WriteString("UnusedRanges");
    6567  json.WriteNumber(stat.unusedRangeCount);
    6568 
    6569  json.WriteString("UsedBytes");
    6570  json.WriteNumber(stat.usedBytes);
    6571 
    6572  json.WriteString("UnusedBytes");
    6573  json.WriteNumber(stat.unusedBytes);
    6574 
    6575  if(stat.allocationCount > 1)
    6576  {
    6577  json.WriteString("AllocationSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.allocationSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.allocationSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.allocationSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  if(stat.unusedRangeCount > 1)
    6589  {
    6590  json.WriteString("UnusedRangeSize");
    6591  json.BeginObject(true);
    6592  json.WriteString("Min");
    6593  json.WriteNumber(stat.unusedRangeSizeMin);
    6594  json.WriteString("Avg");
    6595  json.WriteNumber(stat.unusedRangeSizeAvg);
    6596  json.WriteString("Max");
    6597  json.WriteNumber(stat.unusedRangeSizeMax);
    6598  json.EndObject();
    6599  }
    6600 
    6601  json.EndObject();
    6602 }
    6603 
    6604 #endif // #if VMA_STATS_STRING_ENABLED
    6605 
    6606 struct VmaSuballocationItemSizeLess
    6607 {
    6608  bool operator()(
    6609  const VmaSuballocationList::iterator lhs,
    6610  const VmaSuballocationList::iterator rhs) const
    6611  {
    6612  return lhs->size < rhs->size;
    6613  }
    6614  bool operator()(
    6615  const VmaSuballocationList::iterator lhs,
    6616  VkDeviceSize rhsSize) const
    6617  {
    6618  return lhs->size < rhsSize;
    6619  }
    6620 };
    6621 
    6622 
    6624 // class VmaBlockMetadata
    6625 
    6626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6627  m_Size(0),
    6628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6629 {
    6630 }
    6631 
    6632 #if VMA_STATS_STRING_ENABLED
    6633 
    6634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6635  VkDeviceSize unusedBytes,
    6636  size_t allocationCount,
    6637  size_t unusedRangeCount) const
    6638 {
    6639  json.BeginObject();
    6640 
    6641  json.WriteString("TotalBytes");
    6642  json.WriteNumber(GetSize());
    6643 
    6644  json.WriteString("UnusedBytes");
    6645  json.WriteNumber(unusedBytes);
    6646 
    6647  json.WriteString("Allocations");
    6648  json.WriteNumber((uint64_t)allocationCount);
    6649 
    6650  json.WriteString("UnusedRanges");
    6651  json.WriteNumber((uint64_t)unusedRangeCount);
    6652 
    6653  json.WriteString("Suballocations");
    6654  json.BeginArray();
    6655 }
    6656 
    6657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6658  VkDeviceSize offset,
    6659  VmaAllocation hAllocation) const
    6660 {
    6661  json.BeginObject(true);
    6662 
    6663  json.WriteString("Offset");
    6664  json.WriteNumber(offset);
    6665 
    6666  hAllocation->PrintParameters(json);
    6667 
    6668  json.EndObject();
    6669 }
    6670 
    6671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6672  VkDeviceSize offset,
    6673  VkDeviceSize size) const
    6674 {
    6675  json.BeginObject(true);
    6676 
    6677  json.WriteString("Offset");
    6678  json.WriteNumber(offset);
    6679 
    6680  json.WriteString("Type");
    6681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6682 
    6683  json.WriteString("Size");
    6684  json.WriteNumber(size);
    6685 
    6686  json.EndObject();
    6687 }
    6688 
    6689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6690 {
    6691  json.EndArray();
    6692  json.EndObject();
    6693 }
    6694 
    6695 #endif // #if VMA_STATS_STRING_ENABLED
    6696 
    6698 // class VmaBlockMetadata_Generic
    6699 
    6700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6701  VmaBlockMetadata(hAllocator),
    6702  m_FreeCount(0),
    6703  m_SumFreeSize(0),
    6704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6706 {
    6707 }
    6708 
    6709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6710 {
    6711 }
    6712 
    6713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6714 {
    6715  VmaBlockMetadata::Init(size);
    6716 
    6717  m_FreeCount = 1;
    6718  m_SumFreeSize = size;
    6719 
    6720  VmaSuballocation suballoc = {};
    6721  suballoc.offset = 0;
    6722  suballoc.size = size;
    6723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6724  suballoc.hAllocation = VK_NULL_HANDLE;
    6725 
    6726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6727  m_Suballocations.push_back(suballoc);
    6728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6729  --suballocItem;
    6730  m_FreeSuballocationsBySize.push_back(suballocItem);
    6731 }
    6732 
    6733 bool VmaBlockMetadata_Generic::Validate() const
    6734 {
    6735  VMA_VALIDATE(!m_Suballocations.empty());
    6736 
    6737  // Expected offset of new suballocation as calculated from previous ones.
    6738  VkDeviceSize calculatedOffset = 0;
    6739  // Expected number of free suballocations as calculated from traversing their list.
    6740  uint32_t calculatedFreeCount = 0;
    6741  // Expected sum size of free suballocations as calculated from traversing their list.
    6742  VkDeviceSize calculatedSumFreeSize = 0;
    6743  // Expected number of free suballocations that should be registered in
    6744  // m_FreeSuballocationsBySize calculated from traversing their list.
    6745  size_t freeSuballocationsToRegister = 0;
    6746  // True if previous visited suballocation was free.
    6747  bool prevFree = false;
    6748 
    6749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6750  suballocItem != m_Suballocations.cend();
    6751  ++suballocItem)
    6752  {
    6753  const VmaSuballocation& subAlloc = *suballocItem;
    6754 
    6755  // Actual offset of this suballocation doesn't match expected one.
    6756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6757 
    6758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6759  // Two adjacent free suballocations are invalid. They should be merged.
    6760  VMA_VALIDATE(!prevFree || !currFree);
    6761 
    6762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6763 
    6764  if(currFree)
    6765  {
    6766  calculatedSumFreeSize += subAlloc.size;
    6767  ++calculatedFreeCount;
    6768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6769  {
    6770  ++freeSuballocationsToRegister;
    6771  }
    6772 
    6773  // Margin required between allocations - every free space must be at least that large.
    6774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6775  }
    6776  else
    6777  {
    6778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6780 
    6781  // Margin required between allocations - previous allocation must be free.
    6782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6783  }
    6784 
    6785  calculatedOffset += subAlloc.size;
    6786  prevFree = currFree;
    6787  }
    6788 
    6789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6790  // match expected one.
    6791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6792 
    6793  VkDeviceSize lastSize = 0;
    6794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6795  {
    6796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6797 
    6798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6800  // They must be sorted by size ascending.
    6801  VMA_VALIDATE(suballocItem->size >= lastSize);
    6802 
    6803  lastSize = suballocItem->size;
    6804  }
    6805 
    6806  // Check if totals match calculacted values.
    6807  VMA_VALIDATE(ValidateFreeSuballocationList());
    6808  VMA_VALIDATE(calculatedOffset == GetSize());
    6809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6811 
    6812  return true;
    6813 }
    6814 
    6815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6816 {
    6817  if(!m_FreeSuballocationsBySize.empty())
    6818  {
    6819  return m_FreeSuballocationsBySize.back()->size;
    6820  }
    6821  else
    6822  {
    6823  return 0;
    6824  }
    6825 }
    6826 
    6827 bool VmaBlockMetadata_Generic::IsEmpty() const
    6828 {
    6829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6830 }
    6831 
    6832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6833 {
    6834  outInfo.blockCount = 1;
    6835 
    6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6837  outInfo.allocationCount = rangeCount - m_FreeCount;
    6838  outInfo.unusedRangeCount = m_FreeCount;
    6839 
    6840  outInfo.unusedBytes = m_SumFreeSize;
    6841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6842 
    6843  outInfo.allocationSizeMin = UINT64_MAX;
    6844  outInfo.allocationSizeMax = 0;
    6845  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6846  outInfo.unusedRangeSizeMax = 0;
    6847 
    6848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6849  suballocItem != m_Suballocations.cend();
    6850  ++suballocItem)
    6851  {
    6852  const VmaSuballocation& suballoc = *suballocItem;
    6853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6854  {
    6855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6857  }
    6858  else
    6859  {
    6860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6862  }
    6863  }
    6864 }
    6865 
    6866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6867 {
    6868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6869 
    6870  inoutStats.size += GetSize();
    6871  inoutStats.unusedSize += m_SumFreeSize;
    6872  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6873  inoutStats.unusedRangeCount += m_FreeCount;
    6874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6875 }
    6876 
    6877 #if VMA_STATS_STRING_ENABLED
    6878 
    6879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6880 {
    6881  PrintDetailedMap_Begin(json,
    6882  m_SumFreeSize, // unusedBytes
    6883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6884  m_FreeCount); // unusedRangeCount
    6885 
    6886  size_t i = 0;
    6887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6888  suballocItem != m_Suballocations.cend();
    6889  ++suballocItem, ++i)
    6890  {
    6891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6892  {
    6893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6894  }
    6895  else
    6896  {
    6897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6898  }
    6899  }
    6900 
    6901  PrintDetailedMap_End(json);
    6902 }
    6903 
    6904 #endif // #if VMA_STATS_STRING_ENABLED
    6905 
    6906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6907  uint32_t currentFrameIndex,
    6908  uint32_t frameInUseCount,
    6909  VkDeviceSize bufferImageGranularity,
    6910  VkDeviceSize allocSize,
    6911  VkDeviceSize allocAlignment,
    6912  bool upperAddress,
    6913  VmaSuballocationType allocType,
    6914  bool canMakeOtherLost,
    6915  uint32_t strategy,
    6916  VmaAllocationRequest* pAllocationRequest)
    6917 {
    6918  VMA_ASSERT(allocSize > 0);
    6919  VMA_ASSERT(!upperAddress);
    6920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6922  VMA_HEAVY_ASSERT(Validate());
    6923 
    6924  // There is not enough total free space in this block to fullfill the request: Early return.
    6925  if(canMakeOtherLost == false &&
    6926  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6927  {
    6928  return false;
    6929  }
    6930 
    6931  // New algorithm, efficiently searching freeSuballocationsBySize.
    6932  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6933  if(freeSuballocCount > 0)
    6934  {
    6936  {
    6937  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6938  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6939  m_FreeSuballocationsBySize.data(),
    6940  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6941  allocSize + 2 * VMA_DEBUG_MARGIN,
    6942  VmaSuballocationItemSizeLess());
    6943  size_t index = it - m_FreeSuballocationsBySize.data();
    6944  for(; index < freeSuballocCount; ++index)
    6945  {
    6946  if(CheckAllocation(
    6947  currentFrameIndex,
    6948  frameInUseCount,
    6949  bufferImageGranularity,
    6950  allocSize,
    6951  allocAlignment,
    6952  allocType,
    6953  m_FreeSuballocationsBySize[index],
    6954  false, // canMakeOtherLost
    6955  &pAllocationRequest->offset,
    6956  &pAllocationRequest->itemsToMakeLostCount,
    6957  &pAllocationRequest->sumFreeSize,
    6958  &pAllocationRequest->sumItemSize))
    6959  {
    6960  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6961  return true;
    6962  }
    6963  }
    6964  }
    6965  else // WORST_FIT, FIRST_FIT
    6966  {
    6967  // Search staring from biggest suballocations.
    6968  for(size_t index = freeSuballocCount; index--; )
    6969  {
    6970  if(CheckAllocation(
    6971  currentFrameIndex,
    6972  frameInUseCount,
    6973  bufferImageGranularity,
    6974  allocSize,
    6975  allocAlignment,
    6976  allocType,
    6977  m_FreeSuballocationsBySize[index],
    6978  false, // canMakeOtherLost
    6979  &pAllocationRequest->offset,
    6980  &pAllocationRequest->itemsToMakeLostCount,
    6981  &pAllocationRequest->sumFreeSize,
    6982  &pAllocationRequest->sumItemSize))
    6983  {
    6984  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6985  return true;
    6986  }
    6987  }
    6988  }
    6989  }
    6990 
    6991  if(canMakeOtherLost)
    6992  {
    6993  // Brute-force algorithm. TODO: Come up with something better.
    6994 
    6995  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6996  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6997 
    6998  VmaAllocationRequest tmpAllocRequest = {};
    6999  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7000  suballocIt != m_Suballocations.end();
    7001  ++suballocIt)
    7002  {
    7003  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7004  suballocIt->hAllocation->CanBecomeLost())
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  suballocIt,
    7014  canMakeOtherLost,
    7015  &tmpAllocRequest.offset,
    7016  &tmpAllocRequest.itemsToMakeLostCount,
    7017  &tmpAllocRequest.sumFreeSize,
    7018  &tmpAllocRequest.sumItemSize))
    7019  {
    7020  tmpAllocRequest.item = suballocIt;
    7021 
    7022  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7024  {
    7025  *pAllocationRequest = tmpAllocRequest;
    7026  }
    7027  }
    7028  }
    7029  }
    7030 
    7031  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7032  {
    7033  return true;
    7034  }
    7035  }
    7036 
    7037  return false;
    7038 }
    7039 
    7040 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7041  uint32_t currentFrameIndex,
    7042  uint32_t frameInUseCount,
    7043  VmaAllocationRequest* pAllocationRequest)
    7044 {
    7045  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7046  {
    7047  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7048  {
    7049  ++pAllocationRequest->item;
    7050  }
    7051  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7052  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7053  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7054  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7055  {
    7056  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7057  --pAllocationRequest->itemsToMakeLostCount;
    7058  }
    7059  else
    7060  {
    7061  return false;
    7062  }
    7063  }
    7064 
    7065  VMA_HEAVY_ASSERT(Validate());
    7066  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7067  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7068 
    7069  return true;
    7070 }
    7071 
    7072 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7073 {
    7074  uint32_t lostAllocationCount = 0;
    7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7076  it != m_Suballocations.end();
    7077  ++it)
    7078  {
    7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7080  it->hAllocation->CanBecomeLost() &&
    7081  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7082  {
    7083  it = FreeSuballocation(it);
    7084  ++lostAllocationCount;
    7085  }
    7086  }
    7087  return lostAllocationCount;
    7088 }
    7089 
    7090 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7091 {
    7092  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7093  it != m_Suballocations.end();
    7094  ++it)
    7095  {
    7096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7097  {
    7098  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7099  {
    7100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7101  return VK_ERROR_VALIDATION_FAILED_EXT;
    7102  }
    7103  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7104  {
    7105  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7106  return VK_ERROR_VALIDATION_FAILED_EXT;
    7107  }
    7108  }
    7109  }
    7110 
    7111  return VK_SUCCESS;
    7112 }
    7113 
    7114 void VmaBlockMetadata_Generic::Alloc(
    7115  const VmaAllocationRequest& request,
    7116  VmaSuballocationType type,
    7117  VkDeviceSize allocSize,
    7118  bool upperAddress,
    7119  VmaAllocation hAllocation)
    7120 {
    7121  VMA_ASSERT(!upperAddress);
    7122  VMA_ASSERT(request.item != m_Suballocations.end());
    7123  VmaSuballocation& suballoc = *request.item;
    7124  // Given suballocation is a free block.
    7125  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7126  // Given offset is inside this suballocation.
    7127  VMA_ASSERT(request.offset >= suballoc.offset);
    7128  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7129  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7130  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7131 
    7132  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7133  // it to become used.
    7134  UnregisterFreeSuballocation(request.item);
    7135 
    7136  suballoc.offset = request.offset;
    7137  suballoc.size = allocSize;
    7138  suballoc.type = type;
    7139  suballoc.hAllocation = hAllocation;
    7140 
    7141  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7142  if(paddingEnd)
    7143  {
    7144  VmaSuballocation paddingSuballoc = {};
    7145  paddingSuballoc.offset = request.offset + allocSize;
    7146  paddingSuballoc.size = paddingEnd;
    7147  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7148  VmaSuballocationList::iterator next = request.item;
    7149  ++next;
    7150  const VmaSuballocationList::iterator paddingEndItem =
    7151  m_Suballocations.insert(next, paddingSuballoc);
    7152  RegisterFreeSuballocation(paddingEndItem);
    7153  }
    7154 
    7155  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7156  if(paddingBegin)
    7157  {
    7158  VmaSuballocation paddingSuballoc = {};
    7159  paddingSuballoc.offset = request.offset - paddingBegin;
    7160  paddingSuballoc.size = paddingBegin;
    7161  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7162  const VmaSuballocationList::iterator paddingBeginItem =
    7163  m_Suballocations.insert(request.item, paddingSuballoc);
    7164  RegisterFreeSuballocation(paddingBeginItem);
    7165  }
    7166 
    7167  // Update totals.
    7168  m_FreeCount = m_FreeCount - 1;
    7169  if(paddingBegin > 0)
    7170  {
    7171  ++m_FreeCount;
    7172  }
    7173  if(paddingEnd > 0)
    7174  {
    7175  ++m_FreeCount;
    7176  }
    7177  m_SumFreeSize -= allocSize;
    7178 }
    7179 
    7180 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7181 {
    7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7183  suballocItem != m_Suballocations.end();
    7184  ++suballocItem)
    7185  {
    7186  VmaSuballocation& suballoc = *suballocItem;
    7187  if(suballoc.hAllocation == allocation)
    7188  {
    7189  FreeSuballocation(suballocItem);
    7190  VMA_HEAVY_ASSERT(Validate());
    7191  return;
    7192  }
    7193  }
    7194  VMA_ASSERT(0 && "Not found!");
    7195 }
    7196 
    7197 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7198 {
    7199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7200  suballocItem != m_Suballocations.end();
    7201  ++suballocItem)
    7202  {
    7203  VmaSuballocation& suballoc = *suballocItem;
    7204  if(suballoc.offset == offset)
    7205  {
    7206  FreeSuballocation(suballocItem);
    7207  return;
    7208  }
    7209  }
    7210  VMA_ASSERT(0 && "Not found!");
    7211 }
    7212 
    7213 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7214 {
    7215  VkDeviceSize lastSize = 0;
    7216  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7217  {
    7218  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7219 
    7220  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7221  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7222  VMA_VALIDATE(it->size >= lastSize);
    7223  lastSize = it->size;
    7224  }
    7225  return true;
    7226 }
    7227 
    7228 bool VmaBlockMetadata_Generic::CheckAllocation(
    7229  uint32_t currentFrameIndex,
    7230  uint32_t frameInUseCount,
    7231  VkDeviceSize bufferImageGranularity,
    7232  VkDeviceSize allocSize,
    7233  VkDeviceSize allocAlignment,
    7234  VmaSuballocationType allocType,
    7235  VmaSuballocationList::const_iterator suballocItem,
    7236  bool canMakeOtherLost,
    7237  VkDeviceSize* pOffset,
    7238  size_t* itemsToMakeLostCount,
    7239  VkDeviceSize* pSumFreeSize,
    7240  VkDeviceSize* pSumItemSize) const
    7241 {
    7242  VMA_ASSERT(allocSize > 0);
    7243  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7244  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7245  VMA_ASSERT(pOffset != VMA_NULL);
    7246 
    7247  *itemsToMakeLostCount = 0;
    7248  *pSumFreeSize = 0;
    7249  *pSumItemSize = 0;
    7250 
    7251  if(canMakeOtherLost)
    7252  {
    7253  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7254  {
    7255  *pSumFreeSize = suballocItem->size;
    7256  }
    7257  else
    7258  {
    7259  if(suballocItem->hAllocation->CanBecomeLost() &&
    7260  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7261  {
    7262  ++*itemsToMakeLostCount;
    7263  *pSumItemSize = suballocItem->size;
    7264  }
    7265  else
    7266  {
    7267  return false;
    7268  }
    7269  }
    7270 
    7271  // Remaining size is too small for this request: Early return.
    7272  if(GetSize() - suballocItem->offset < allocSize)
    7273  {
    7274  return false;
    7275  }
    7276 
    7277  // Start from offset equal to beginning of this suballocation.
    7278  *pOffset = suballocItem->offset;
    7279 
    7280  // Apply VMA_DEBUG_MARGIN at the beginning.
    7281  if(VMA_DEBUG_MARGIN > 0)
    7282  {
    7283  *pOffset += VMA_DEBUG_MARGIN;
    7284  }
    7285 
    7286  // Apply alignment.
    7287  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7288 
    7289  // Check previous suballocations for BufferImageGranularity conflicts.
    7290  // Make bigger alignment if necessary.
    7291  if(bufferImageGranularity > 1)
    7292  {
    7293  bool bufferImageGranularityConflict = false;
    7294  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7295  while(prevSuballocItem != m_Suballocations.cbegin())
    7296  {
    7297  --prevSuballocItem;
    7298  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7299  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7300  {
    7301  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7302  {
    7303  bufferImageGranularityConflict = true;
    7304  break;
    7305  }
    7306  }
    7307  else
    7308  // Already on previous page.
    7309  break;
    7310  }
    7311  if(bufferImageGranularityConflict)
    7312  {
    7313  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7314  }
    7315  }
    7316 
    7317  // Now that we have final *pOffset, check if we are past suballocItem.
    7318  // If yes, return false - this function should be called for another suballocItem as starting point.
    7319  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Calculate padding at the beginning based on current offset.
    7325  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7326 
    7327  // Calculate required margin at the end.
    7328  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7329 
    7330  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7331  // Another early return check.
    7332  if(suballocItem->offset + totalSize > GetSize())
    7333  {
    7334  return false;
    7335  }
    7336 
    7337  // Advance lastSuballocItem until desired size is reached.
    7338  // Update itemsToMakeLostCount.
    7339  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7340  if(totalSize > suballocItem->size)
    7341  {
    7342  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7343  while(remainingSize > 0)
    7344  {
    7345  ++lastSuballocItem;
    7346  if(lastSuballocItem == m_Suballocations.cend())
    7347  {
    7348  return false;
    7349  }
    7350  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7351  {
    7352  *pSumFreeSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7357  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7358  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7359  {
    7360  ++*itemsToMakeLostCount;
    7361  *pSumItemSize += lastSuballocItem->size;
    7362  }
    7363  else
    7364  {
    7365  return false;
    7366  }
    7367  }
    7368  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7369  remainingSize - lastSuballocItem->size : 0;
    7370  }
    7371  }
    7372 
    7373  // Check next suballocations for BufferImageGranularity conflicts.
    7374  // If conflict exists, we must mark more allocations lost or fail.
    7375  if(bufferImageGranularity > 1)
    7376  {
    7377  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7378  ++nextSuballocItem;
    7379  while(nextSuballocItem != m_Suballocations.cend())
    7380  {
    7381  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7382  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7383  {
    7384  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7385  {
    7386  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7387  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7388  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7389  {
    7390  ++*itemsToMakeLostCount;
    7391  }
    7392  else
    7393  {
    7394  return false;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  // Already on next page.
    7401  break;
    7402  }
    7403  ++nextSuballocItem;
    7404  }
    7405  }
    7406  }
    7407  else
    7408  {
    7409  const VmaSuballocation& suballoc = *suballocItem;
    7410  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7411 
    7412  *pSumFreeSize = suballoc.size;
    7413 
    7414  // Size of this suballocation is too small for this request: Early return.
    7415  if(suballoc.size < allocSize)
    7416  {
    7417  return false;
    7418  }
    7419 
    7420  // Start from offset equal to beginning of this suballocation.
    7421  *pOffset = suballoc.offset;
    7422 
    7423  // Apply VMA_DEBUG_MARGIN at the beginning.
    7424  if(VMA_DEBUG_MARGIN > 0)
    7425  {
    7426  *pOffset += VMA_DEBUG_MARGIN;
    7427  }
    7428 
    7429  // Apply alignment.
    7430  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7431 
    7432  // Check previous suballocations for BufferImageGranularity conflicts.
    7433  // Make bigger alignment if necessary.
    7434  if(bufferImageGranularity > 1)
    7435  {
    7436  bool bufferImageGranularityConflict = false;
    7437  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7438  while(prevSuballocItem != m_Suballocations.cbegin())
    7439  {
    7440  --prevSuballocItem;
    7441  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7442  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7443  {
    7444  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7445  {
    7446  bufferImageGranularityConflict = true;
    7447  break;
    7448  }
    7449  }
    7450  else
    7451  // Already on previous page.
    7452  break;
    7453  }
    7454  if(bufferImageGranularityConflict)
    7455  {
    7456  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7457  }
    7458  }
    7459 
    7460  // Calculate padding at the beginning based on current offset.
    7461  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7462 
    7463  // Calculate required margin at the end.
    7464  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7465 
    7466  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7467  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7468  {
    7469  return false;
    7470  }
    7471 
    7472  // Check next suballocations for BufferImageGranularity conflicts.
    7473  // If conflict exists, allocation cannot be made here.
    7474  if(bufferImageGranularity > 1)
    7475  {
    7476  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7477  ++nextSuballocItem;
    7478  while(nextSuballocItem != m_Suballocations.cend())
    7479  {
    7480  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7481  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7482  {
    7483  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7484  {
    7485  return false;
    7486  }
    7487  }
    7488  else
    7489  {
    7490  // Already on next page.
    7491  break;
    7492  }
    7493  ++nextSuballocItem;
    7494  }
    7495  }
    7496  }
    7497 
    7498  // All tests passed: Success. pOffset is already filled.
    7499  return true;
    7500 }
    7501 
    7502 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7503 {
    7504  VMA_ASSERT(item != m_Suballocations.end());
    7505  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7506 
    7507  VmaSuballocationList::iterator nextItem = item;
    7508  ++nextItem;
    7509  VMA_ASSERT(nextItem != m_Suballocations.end());
    7510  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7511 
    7512  item->size += nextItem->size;
    7513  --m_FreeCount;
    7514  m_Suballocations.erase(nextItem);
    7515 }
    7516 
    7517 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7518 {
    7519  // Change this suballocation to be marked as free.
    7520  VmaSuballocation& suballoc = *suballocItem;
    7521  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7522  suballoc.hAllocation = VK_NULL_HANDLE;
    7523 
    7524  // Update totals.
    7525  ++m_FreeCount;
    7526  m_SumFreeSize += suballoc.size;
    7527 
    7528  // Merge with previous and/or next suballocation if it's also free.
    7529  bool mergeWithNext = false;
    7530  bool mergeWithPrev = false;
    7531 
    7532  VmaSuballocationList::iterator nextItem = suballocItem;
    7533  ++nextItem;
    7534  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7535  {
    7536  mergeWithNext = true;
    7537  }
    7538 
    7539  VmaSuballocationList::iterator prevItem = suballocItem;
    7540  if(suballocItem != m_Suballocations.begin())
    7541  {
    7542  --prevItem;
    7543  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7544  {
    7545  mergeWithPrev = true;
    7546  }
    7547  }
    7548 
    7549  if(mergeWithNext)
    7550  {
    7551  UnregisterFreeSuballocation(nextItem);
    7552  MergeFreeWithNext(suballocItem);
    7553  }
    7554 
    7555  if(mergeWithPrev)
    7556  {
    7557  UnregisterFreeSuballocation(prevItem);
    7558  MergeFreeWithNext(prevItem);
    7559  RegisterFreeSuballocation(prevItem);
    7560  return prevItem;
    7561  }
    7562  else
    7563  {
    7564  RegisterFreeSuballocation(suballocItem);
    7565  return suballocItem;
    7566  }
    7567 }
    7568 
    7569 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7570 {
    7571  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7572  VMA_ASSERT(item->size > 0);
    7573 
    7574  // You may want to enable this validation at the beginning or at the end of
    7575  // this function, depending on what do you want to check.
    7576  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7577 
    7578  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7579  {
    7580  if(m_FreeSuballocationsBySize.empty())
    7581  {
    7582  m_FreeSuballocationsBySize.push_back(item);
    7583  }
    7584  else
    7585  {
    7586  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7587  }
    7588  }
    7589 
    7590  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7591 }
    7592 
    7593 
    7594 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7595 {
    7596  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7597  VMA_ASSERT(item->size > 0);
    7598 
    7599  // You may want to enable this validation at the beginning or at the end of
    7600  // this function, depending on what do you want to check.
    7601  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7602 
    7603  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7604  {
    7605  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7606  m_FreeSuballocationsBySize.data(),
    7607  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7608  item,
    7609  VmaSuballocationItemSizeLess());
    7610  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7611  index < m_FreeSuballocationsBySize.size();
    7612  ++index)
    7613  {
    7614  if(m_FreeSuballocationsBySize[index] == item)
    7615  {
    7616  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7617  return;
    7618  }
    7619  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7620  }
    7621  VMA_ASSERT(0 && "Not found.");
    7622  }
    7623 
    7624  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7625 }
    7626 
    7628 // class VmaBlockMetadata_Linear
    7629 
    7630 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7631  VmaBlockMetadata(hAllocator),
    7632  m_SumFreeSize(0),
    7633  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7634  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7635  m_1stVectorIndex(0),
    7636  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7637  m_1stNullItemsBeginCount(0),
    7638  m_1stNullItemsMiddleCount(0),
    7639  m_2ndNullItemsCount(0)
    7640 {
    7641 }
    7642 
    7643 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7644 {
    7645 }
    7646 
    7647 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7648 {
    7649  VmaBlockMetadata::Init(size);
    7650  m_SumFreeSize = size;
    7651 }
    7652 
    7653 bool VmaBlockMetadata_Linear::Validate() const
    7654 {
    7655  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7656  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7657 
    7658  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7659  VMA_VALIDATE(!suballocations1st.empty() ||
    7660  suballocations2nd.empty() ||
    7661  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7662 
    7663  if(!suballocations1st.empty())
    7664  {
    7665  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7666  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7667  // Null item at the end should be just pop_back().
    7668  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7669  }
    7670  if(!suballocations2nd.empty())
    7671  {
    7672  // Null item at the end should be just pop_back().
    7673  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7674  }
    7675 
    7676  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7677  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7678 
    7679  VkDeviceSize sumUsedSize = 0;
    7680  const size_t suballoc1stCount = suballocations1st.size();
    7681  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7682 
    7683  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7684  {
    7685  const size_t suballoc2ndCount = suballocations2nd.size();
    7686  size_t nullItem2ndCount = 0;
    7687  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7688  {
    7689  const VmaSuballocation& suballoc = suballocations2nd[i];
    7690  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7693  VMA_VALIDATE(suballoc.offset >= offset);
    7694 
    7695  if(!currFree)
    7696  {
    7697  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7698  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7699  sumUsedSize += suballoc.size;
    7700  }
    7701  else
    7702  {
    7703  ++nullItem2ndCount;
    7704  }
    7705 
    7706  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7707  }
    7708 
    7709  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7710  }
    7711 
    7712  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7716  suballoc.hAllocation == VK_NULL_HANDLE);
    7717  }
    7718 
    7719  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7720 
    7721  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7722  {
    7723  const VmaSuballocation& suballoc = suballocations1st[i];
    7724  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7725 
    7726  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7727  VMA_VALIDATE(suballoc.offset >= offset);
    7728  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7729 
    7730  if(!currFree)
    7731  {
    7732  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7733  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7734  sumUsedSize += suballoc.size;
    7735  }
    7736  else
    7737  {
    7738  ++nullItem1stCount;
    7739  }
    7740 
    7741  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7742  }
    7743  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7744 
    7745  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7746  {
    7747  const size_t suballoc2ndCount = suballocations2nd.size();
    7748  size_t nullItem2ndCount = 0;
    7749  for(size_t i = suballoc2ndCount; i--; )
    7750  {
    7751  const VmaSuballocation& suballoc = suballocations2nd[i];
    7752  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7753 
    7754  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7755  VMA_VALIDATE(suballoc.offset >= offset);
    7756 
    7757  if(!currFree)
    7758  {
    7759  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7760  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7761  sumUsedSize += suballoc.size;
    7762  }
    7763  else
    7764  {
    7765  ++nullItem2ndCount;
    7766  }
    7767 
    7768  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7769  }
    7770 
    7771  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7772  }
    7773 
    7774  VMA_VALIDATE(offset <= GetSize());
    7775  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7776 
    7777  return true;
    7778 }
    7779 
    7780 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7781 {
    7782  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7783  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7784 }
    7785 
    7786 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7787 {
    7788  const VkDeviceSize size = GetSize();
    7789 
    7790  /*
    7791  We don't consider gaps inside allocation vectors with freed allocations because
    7792  they are not suitable for reuse in linear allocator. We consider only space that
    7793  is available for new allocations.
    7794  */
    7795  if(IsEmpty())
    7796  {
    7797  return size;
    7798  }
    7799 
    7800  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7801 
    7802  switch(m_2ndVectorMode)
    7803  {
    7804  case SECOND_VECTOR_EMPTY:
    7805  /*
    7806  Available space is after end of 1st, as well as before beginning of 1st (which
    7807  whould make it a ring buffer).
    7808  */
    7809  {
    7810  const size_t suballocations1stCount = suballocations1st.size();
    7811  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7812  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7813  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7814  return VMA_MAX(
    7815  firstSuballoc.offset,
    7816  size - (lastSuballoc.offset + lastSuballoc.size));
    7817  }
    7818  break;
    7819 
    7820  case SECOND_VECTOR_RING_BUFFER:
    7821  /*
    7822  Available space is only between end of 2nd and beginning of 1st.
    7823  */
    7824  {
    7825  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7826  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7827  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7828  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7829  }
    7830  break;
    7831 
    7832  case SECOND_VECTOR_DOUBLE_STACK:
    7833  /*
    7834  Available space is only between end of 1st and top of 2nd.
    7835  */
    7836  {
    7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7838  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7839  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7840  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7841  }
    7842  break;
    7843 
    7844  default:
    7845  VMA_ASSERT(0);
    7846  return 0;
    7847  }
    7848 }
    7849 
    7850 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7851 {
    7852  const VkDeviceSize size = GetSize();
    7853  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7854  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7855  const size_t suballoc1stCount = suballocations1st.size();
    7856  const size_t suballoc2ndCount = suballocations2nd.size();
    7857 
    7858  outInfo.blockCount = 1;
    7859  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7860  outInfo.unusedRangeCount = 0;
    7861  outInfo.usedBytes = 0;
    7862  outInfo.allocationSizeMin = UINT64_MAX;
    7863  outInfo.allocationSizeMax = 0;
    7864  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7865  outInfo.unusedRangeSizeMax = 0;
    7866 
    7867  VkDeviceSize lastOffset = 0;
    7868 
    7869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7870  {
    7871  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7872  size_t nextAlloc2ndIndex = 0;
    7873  while(lastOffset < freeSpace2ndTo1stEnd)
    7874  {
    7875  // Find next non-null allocation or move nextAllocIndex to the end.
    7876  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7877  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7878  {
    7879  ++nextAlloc2ndIndex;
    7880  }
    7881 
    7882  // Found non-null allocation.
    7883  if(nextAlloc2ndIndex < suballoc2ndCount)
    7884  {
    7885  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7886 
    7887  // 1. Process free space before this allocation.
    7888  if(lastOffset < suballoc.offset)
    7889  {
    7890  // There is free space from lastOffset to suballoc.offset.
    7891  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7892  ++outInfo.unusedRangeCount;
    7893  outInfo.unusedBytes += unusedRangeSize;
    7894  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7895  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7896  }
    7897 
    7898  // 2. Process this allocation.
    7899  // There is allocation with suballoc.offset, suballoc.size.
    7900  outInfo.usedBytes += suballoc.size;
    7901  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7902  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7903 
    7904  // 3. Prepare for next iteration.
    7905  lastOffset = suballoc.offset + suballoc.size;
    7906  ++nextAlloc2ndIndex;
    7907  }
    7908  // We are at the end.
    7909  else
    7910  {
    7911  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7912  if(lastOffset < freeSpace2ndTo1stEnd)
    7913  {
    7914  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7915  ++outInfo.unusedRangeCount;
    7916  outInfo.unusedBytes += unusedRangeSize;
    7917  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7918  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7919  }
    7920 
    7921  // End of loop.
    7922  lastOffset = freeSpace2ndTo1stEnd;
    7923  }
    7924  }
    7925  }
    7926 
    7927  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7928  const VkDeviceSize freeSpace1stTo2ndEnd =
    7929  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7930  while(lastOffset < freeSpace1stTo2ndEnd)
    7931  {
    7932  // Find next non-null allocation or move nextAllocIndex to the end.
    7933  while(nextAlloc1stIndex < suballoc1stCount &&
    7934  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7935  {
    7936  ++nextAlloc1stIndex;
    7937  }
    7938 
    7939  // Found non-null allocation.
    7940  if(nextAlloc1stIndex < suballoc1stCount)
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7943 
    7944  // 1. Process free space before this allocation.
    7945  if(lastOffset < suballoc.offset)
    7946  {
    7947  // There is free space from lastOffset to suballoc.offset.
    7948  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7949  ++outInfo.unusedRangeCount;
    7950  outInfo.unusedBytes += unusedRangeSize;
    7951  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7952  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7953  }
    7954 
    7955  // 2. Process this allocation.
    7956  // There is allocation with suballoc.offset, suballoc.size.
    7957  outInfo.usedBytes += suballoc.size;
    7958  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7959  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7960 
    7961  // 3. Prepare for next iteration.
    7962  lastOffset = suballoc.offset + suballoc.size;
    7963  ++nextAlloc1stIndex;
    7964  }
    7965  // We are at the end.
    7966  else
    7967  {
    7968  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7969  if(lastOffset < freeSpace1stTo2ndEnd)
    7970  {
    7971  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7972  ++outInfo.unusedRangeCount;
    7973  outInfo.unusedBytes += unusedRangeSize;
    7974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7976  }
    7977 
    7978  // End of loop.
    7979  lastOffset = freeSpace1stTo2ndEnd;
    7980  }
    7981  }
    7982 
    7983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7984  {
    7985  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7986  while(lastOffset < size)
    7987  {
    7988  // Find next non-null allocation or move nextAllocIndex to the end.
    7989  while(nextAlloc2ndIndex != SIZE_MAX &&
    7990  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7991  {
    7992  --nextAlloc2ndIndex;
    7993  }
    7994 
    7995  // Found non-null allocation.
    7996  if(nextAlloc2ndIndex != SIZE_MAX)
    7997  {
    7998  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7999 
    8000  // 1. Process free space before this allocation.
    8001  if(lastOffset < suballoc.offset)
    8002  {
    8003  // There is free space from lastOffset to suballoc.offset.
    8004  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8005  ++outInfo.unusedRangeCount;
    8006  outInfo.unusedBytes += unusedRangeSize;
    8007  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8008  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8009  }
    8010 
    8011  // 2. Process this allocation.
    8012  // There is allocation with suballoc.offset, suballoc.size.
    8013  outInfo.usedBytes += suballoc.size;
    8014  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8015  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8016 
    8017  // 3. Prepare for next iteration.
    8018  lastOffset = suballoc.offset + suballoc.size;
    8019  --nextAlloc2ndIndex;
    8020  }
    8021  // We are at the end.
    8022  else
    8023  {
    8024  // There is free space from lastOffset to size.
    8025  if(lastOffset < size)
    8026  {
    8027  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8028  ++outInfo.unusedRangeCount;
    8029  outInfo.unusedBytes += unusedRangeSize;
    8030  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8031  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8032  }
    8033 
    8034  // End of loop.
    8035  lastOffset = size;
    8036  }
    8037  }
    8038  }
    8039 
    8040  outInfo.unusedBytes = size - outInfo.usedBytes;
    8041 }
    8042 
    8043 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8044 {
    8045  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8046  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8047  const VkDeviceSize size = GetSize();
    8048  const size_t suballoc1stCount = suballocations1st.size();
    8049  const size_t suballoc2ndCount = suballocations2nd.size();
    8050 
    8051  inoutStats.size += size;
    8052 
    8053  VkDeviceSize lastOffset = 0;
    8054 
    8055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8056  {
    8057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8058  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8059  while(lastOffset < freeSpace2ndTo1stEnd)
    8060  {
    8061  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8062  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8064  {
    8065  ++nextAlloc2ndIndex;
    8066  }
    8067 
    8068  // Found non-null allocation.
    8069  if(nextAlloc2ndIndex < suballoc2ndCount)
    8070  {
    8071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8072 
    8073  // 1. Process free space before this allocation.
    8074  if(lastOffset < suballoc.offset)
    8075  {
    8076  // There is free space from lastOffset to suballoc.offset.
    8077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8078  inoutStats.unusedSize += unusedRangeSize;
    8079  ++inoutStats.unusedRangeCount;
    8080  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  ++inoutStats.allocationCount;
    8086 
    8087  // 3. Prepare for next iteration.
    8088  lastOffset = suballoc.offset + suballoc.size;
    8089  ++nextAlloc2ndIndex;
    8090  }
    8091  // We are at the end.
    8092  else
    8093  {
    8094  if(lastOffset < freeSpace2ndTo1stEnd)
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8098  inoutStats.unusedSize += unusedRangeSize;
    8099  ++inoutStats.unusedRangeCount;
    8100  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8101  }
    8102 
    8103  // End of loop.
    8104  lastOffset = freeSpace2ndTo1stEnd;
    8105  }
    8106  }
    8107  }
    8108 
    8109  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8110  const VkDeviceSize freeSpace1stTo2ndEnd =
    8111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8112  while(lastOffset < freeSpace1stTo2ndEnd)
    8113  {
    8114  // Find next non-null allocation or move nextAllocIndex to the end.
    8115  while(nextAlloc1stIndex < suballoc1stCount &&
    8116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8117  {
    8118  ++nextAlloc1stIndex;
    8119  }
    8120 
    8121  // Found non-null allocation.
    8122  if(nextAlloc1stIndex < suballoc1stCount)
    8123  {
    8124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8125 
    8126  // 1. Process free space before this allocation.
    8127  if(lastOffset < suballoc.offset)
    8128  {
    8129  // There is free space from lastOffset to suballoc.offset.
    8130  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8131  inoutStats.unusedSize += unusedRangeSize;
    8132  ++inoutStats.unusedRangeCount;
    8133  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8134  }
    8135 
    8136  // 2. Process this allocation.
    8137  // There is allocation with suballoc.offset, suballoc.size.
    8138  ++inoutStats.allocationCount;
    8139 
    8140  // 3. Prepare for next iteration.
    8141  lastOffset = suballoc.offset + suballoc.size;
    8142  ++nextAlloc1stIndex;
    8143  }
    8144  // We are at the end.
    8145  else
    8146  {
    8147  if(lastOffset < freeSpace1stTo2ndEnd)
    8148  {
    8149  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8150  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8151  inoutStats.unusedSize += unusedRangeSize;
    8152  ++inoutStats.unusedRangeCount;
    8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8154  }
    8155 
    8156  // End of loop.
    8157  lastOffset = freeSpace1stTo2ndEnd;
    8158  }
    8159  }
    8160 
    8161  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8162  {
    8163  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8164  while(lastOffset < size)
    8165  {
    8166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8167  while(nextAlloc2ndIndex != SIZE_MAX &&
    8168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8169  {
    8170  --nextAlloc2ndIndex;
    8171  }
    8172 
    8173  // Found non-null allocation.
    8174  if(nextAlloc2ndIndex != SIZE_MAX)
    8175  {
    8176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8177 
    8178  // 1. Process free space before this allocation.
    8179  if(lastOffset < suballoc.offset)
    8180  {
    8181  // There is free space from lastOffset to suballoc.offset.
    8182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8183  inoutStats.unusedSize += unusedRangeSize;
    8184  ++inoutStats.unusedRangeCount;
    8185  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8186  }
    8187 
    8188  // 2. Process this allocation.
    8189  // There is allocation with suballoc.offset, suballoc.size.
    8190  ++inoutStats.allocationCount;
    8191 
    8192  // 3. Prepare for next iteration.
    8193  lastOffset = suballoc.offset + suballoc.size;
    8194  --nextAlloc2ndIndex;
    8195  }
    8196  // We are at the end.
    8197  else
    8198  {
    8199  if(lastOffset < size)
    8200  {
    8201  // There is free space from lastOffset to size.
    8202  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8203  inoutStats.unusedSize += unusedRangeSize;
    8204  ++inoutStats.unusedRangeCount;
    8205  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8206  }
    8207 
    8208  // End of loop.
    8209  lastOffset = size;
    8210  }
    8211  }
    8212  }
    8213 }
    8214 
    8215 #if VMA_STATS_STRING_ENABLED
    8216 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8217 {
    8218  const VkDeviceSize size = GetSize();
    8219  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8220  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8221  const size_t suballoc1stCount = suballocations1st.size();
    8222  const size_t suballoc2ndCount = suballocations2nd.size();
    8223 
    8224  // FIRST PASS
    8225 
    8226  size_t unusedRangeCount = 0;
    8227  VkDeviceSize usedBytes = 0;
    8228 
    8229  VkDeviceSize lastOffset = 0;
    8230 
    8231  size_t alloc2ndCount = 0;
    8232  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8233  {
    8234  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8235  size_t nextAlloc2ndIndex = 0;
    8236  while(lastOffset < freeSpace2ndTo1stEnd)
    8237  {
    8238  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8239  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8240  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8241  {
    8242  ++nextAlloc2ndIndex;
    8243  }
    8244 
    8245  // Found non-null allocation.
    8246  if(nextAlloc2ndIndex < suballoc2ndCount)
    8247  {
    8248  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8249 
    8250  // 1. Process free space before this allocation.
    8251  if(lastOffset < suballoc.offset)
    8252  {
    8253  // There is free space from lastOffset to suballoc.offset.
    8254  ++unusedRangeCount;
    8255  }
    8256 
    8257  // 2. Process this allocation.
    8258  // There is allocation with suballoc.offset, suballoc.size.
    8259  ++alloc2ndCount;
    8260  usedBytes += suballoc.size;
    8261 
    8262  // 3. Prepare for next iteration.
    8263  lastOffset = suballoc.offset + suballoc.size;
    8264  ++nextAlloc2ndIndex;
    8265  }
    8266  // We are at the end.
    8267  else
    8268  {
    8269  if(lastOffset < freeSpace2ndTo1stEnd)
    8270  {
    8271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8272  ++unusedRangeCount;
    8273  }
    8274 
    8275  // End of loop.
    8276  lastOffset = freeSpace2ndTo1stEnd;
    8277  }
    8278  }
    8279  }
    8280 
    8281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8282  size_t alloc1stCount = 0;
    8283  const VkDeviceSize freeSpace1stTo2ndEnd =
    8284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8285  while(lastOffset < freeSpace1stTo2ndEnd)
    8286  {
    8287  // Find next non-null allocation or move nextAllocIndex to the end.
    8288  while(nextAlloc1stIndex < suballoc1stCount &&
    8289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8290  {
    8291  ++nextAlloc1stIndex;
    8292  }
    8293 
    8294  // Found non-null allocation.
    8295  if(nextAlloc1stIndex < suballoc1stCount)
    8296  {
    8297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8298 
    8299  // 1. Process free space before this allocation.
    8300  if(lastOffset < suballoc.offset)
    8301  {
    8302  // There is free space from lastOffset to suballoc.offset.
    8303  ++unusedRangeCount;
    8304  }
    8305 
    8306  // 2. Process this allocation.
    8307  // There is allocation with suballoc.offset, suballoc.size.
    8308  ++alloc1stCount;
    8309  usedBytes += suballoc.size;
    8310 
    8311  // 3. Prepare for next iteration.
    8312  lastOffset = suballoc.offset + suballoc.size;
    8313  ++nextAlloc1stIndex;
    8314  }
    8315  // We are at the end.
    8316  else
    8317  {
    8318  if(lastOffset < size)
    8319  {
    8320  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8321  ++unusedRangeCount;
    8322  }
    8323 
    8324  // End of loop.
    8325  lastOffset = freeSpace1stTo2ndEnd;
    8326  }
    8327  }
    8328 
    8329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8330  {
    8331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8332  while(lastOffset < size)
    8333  {
    8334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8335  while(nextAlloc2ndIndex != SIZE_MAX &&
    8336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8337  {
    8338  --nextAlloc2ndIndex;
    8339  }
    8340 
    8341  // Found non-null allocation.
    8342  if(nextAlloc2ndIndex != SIZE_MAX)
    8343  {
    8344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8345 
    8346  // 1. Process free space before this allocation.
    8347  if(lastOffset < suballoc.offset)
    8348  {
    8349  // There is free space from lastOffset to suballoc.offset.
    8350  ++unusedRangeCount;
    8351  }
    8352 
    8353  // 2. Process this allocation.
    8354  // There is allocation with suballoc.offset, suballoc.size.
    8355  ++alloc2ndCount;
    8356  usedBytes += suballoc.size;
    8357 
    8358  // 3. Prepare for next iteration.
    8359  lastOffset = suballoc.offset + suballoc.size;
    8360  --nextAlloc2ndIndex;
    8361  }
    8362  // We are at the end.
    8363  else
    8364  {
    8365  if(lastOffset < size)
    8366  {
    8367  // There is free space from lastOffset to size.
    8368  ++unusedRangeCount;
    8369  }
    8370 
    8371  // End of loop.
    8372  lastOffset = size;
    8373  }
    8374  }
    8375  }
    8376 
    8377  const VkDeviceSize unusedBytes = size - usedBytes;
    8378  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8379 
    8380  // SECOND PASS
    8381  lastOffset = 0;
    8382 
    8383  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8384  {
    8385  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8386  size_t nextAlloc2ndIndex = 0;
    8387  while(lastOffset < freeSpace2ndTo1stEnd)
    8388  {
    8389  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8390  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8391  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8392  {
    8393  ++nextAlloc2ndIndex;
    8394  }
    8395 
    8396  // Found non-null allocation.
    8397  if(nextAlloc2ndIndex < suballoc2ndCount)
    8398  {
    8399  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8400 
    8401  // 1. Process free space before this allocation.
    8402  if(lastOffset < suballoc.offset)
    8403  {
    8404  // There is free space from lastOffset to suballoc.offset.
    8405  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8406  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8407  }
    8408 
    8409  // 2. Process this allocation.
    8410  // There is allocation with suballoc.offset, suballoc.size.
    8411  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8412 
    8413  // 3. Prepare for next iteration.
    8414  lastOffset = suballoc.offset + suballoc.size;
    8415  ++nextAlloc2ndIndex;
    8416  }
    8417  // We are at the end.
    8418  else
    8419  {
    8420  if(lastOffset < freeSpace2ndTo1stEnd)
    8421  {
    8422  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8423  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8424  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8425  }
    8426 
    8427  // End of loop.
    8428  lastOffset = freeSpace2ndTo1stEnd;
    8429  }
    8430  }
    8431  }
    8432 
    8433  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8434  while(lastOffset < freeSpace1stTo2ndEnd)
    8435  {
    8436  // Find next non-null allocation or move nextAllocIndex to the end.
    8437  while(nextAlloc1stIndex < suballoc1stCount &&
    8438  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8439  {
    8440  ++nextAlloc1stIndex;
    8441  }
    8442 
    8443  // Found non-null allocation.
    8444  if(nextAlloc1stIndex < suballoc1stCount)
    8445  {
    8446  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8447 
    8448  // 1. Process free space before this allocation.
    8449  if(lastOffset < suballoc.offset)
    8450  {
    8451  // There is free space from lastOffset to suballoc.offset.
    8452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8454  }
    8455 
    8456  // 2. Process this allocation.
    8457  // There is allocation with suballoc.offset, suballoc.size.
    8458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8459 
    8460  // 3. Prepare for next iteration.
    8461  lastOffset = suballoc.offset + suballoc.size;
    8462  ++nextAlloc1stIndex;
    8463  }
    8464  // We are at the end.
    8465  else
    8466  {
    8467  if(lastOffset < freeSpace1stTo2ndEnd)
    8468  {
    8469  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8470  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8472  }
    8473 
    8474  // End of loop.
    8475  lastOffset = freeSpace1stTo2ndEnd;
    8476  }
    8477  }
    8478 
    8479  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8480  {
    8481  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8482  while(lastOffset < size)
    8483  {
    8484  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8485  while(nextAlloc2ndIndex != SIZE_MAX &&
    8486  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8487  {
    8488  --nextAlloc2ndIndex;
    8489  }
    8490 
    8491  // Found non-null allocation.
    8492  if(nextAlloc2ndIndex != SIZE_MAX)
    8493  {
    8494  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8495 
    8496  // 1. Process free space before this allocation.
    8497  if(lastOffset < suballoc.offset)
    8498  {
    8499  // There is free space from lastOffset to suballoc.offset.
    8500  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8501  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8502  }
    8503 
    8504  // 2. Process this allocation.
    8505  // There is allocation with suballoc.offset, suballoc.size.
    8506  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8507 
    8508  // 3. Prepare for next iteration.
    8509  lastOffset = suballoc.offset + suballoc.size;
    8510  --nextAlloc2ndIndex;
    8511  }
    8512  // We are at the end.
    8513  else
    8514  {
    8515  if(lastOffset < size)
    8516  {
    8517  // There is free space from lastOffset to size.
    8518  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8519  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8520  }
    8521 
    8522  // End of loop.
    8523  lastOffset = size;
    8524  }
    8525  }
    8526  }
    8527 
    8528  PrintDetailedMap_End(json);
    8529 }
    8530 #endif // #if VMA_STATS_STRING_ENABLED
    8531 
    8532 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8533  uint32_t currentFrameIndex,
    8534  uint32_t frameInUseCount,
    8535  VkDeviceSize bufferImageGranularity,
    8536  VkDeviceSize allocSize,
    8537  VkDeviceSize allocAlignment,
    8538  bool upperAddress,
    8539  VmaSuballocationType allocType,
    8540  bool canMakeOtherLost,
    8541  uint32_t strategy,
    8542  VmaAllocationRequest* pAllocationRequest)
    8543 {
    8544  VMA_ASSERT(allocSize > 0);
    8545  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8546  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8547  VMA_HEAVY_ASSERT(Validate());
    8548 
    8549  const VkDeviceSize size = GetSize();
    8550  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8551  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8552 
    8553  if(upperAddress)
    8554  {
    8555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8556  {
    8557  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8558  return false;
    8559  }
    8560 
    8561  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8562  if(allocSize > size)
    8563  {
    8564  return false;
    8565  }
    8566  VkDeviceSize resultBaseOffset = size - allocSize;
    8567  if(!suballocations2nd.empty())
    8568  {
    8569  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8570  resultBaseOffset = lastSuballoc.offset - allocSize;
    8571  if(allocSize > lastSuballoc.offset)
    8572  {
    8573  return false;
    8574  }
    8575  }
    8576 
    8577  // Start from offset equal to end of free space.
    8578  VkDeviceSize resultOffset = resultBaseOffset;
    8579 
    8580  // Apply VMA_DEBUG_MARGIN at the end.
    8581  if(VMA_DEBUG_MARGIN > 0)
    8582  {
    8583  if(resultOffset < VMA_DEBUG_MARGIN)
    8584  {
    8585  return false;
    8586  }
    8587  resultOffset -= VMA_DEBUG_MARGIN;
    8588  }
    8589 
    8590  // Apply alignment.
    8591  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8592 
    8593  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8594  // Make bigger alignment if necessary.
    8595  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8596  {
    8597  bool bufferImageGranularityConflict = false;
    8598  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8599  {
    8600  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8601  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8602  {
    8603  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8604  {
    8605  bufferImageGranularityConflict = true;
    8606  break;
    8607  }
    8608  }
    8609  else
    8610  // Already on previous page.
    8611  break;
    8612  }
    8613  if(bufferImageGranularityConflict)
    8614  {
    8615  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8616  }
    8617  }
    8618 
    8619  // There is enough free space.
    8620  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8621  suballocations1st.back().offset + suballocations1st.back().size :
    8622  0;
    8623  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8624  {
    8625  // Check previous suballocations for BufferImageGranularity conflicts.
    8626  // If conflict exists, allocation cannot be made here.
    8627  if(bufferImageGranularity > 1)
    8628  {
    8629  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8630  {
    8631  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8633  {
    8634  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8635  {
    8636  return false;
    8637  }
    8638  }
    8639  else
    8640  {
    8641  // Already on next page.
    8642  break;
    8643  }
    8644  }
    8645  }
    8646 
    8647  // All tests passed: Success.
    8648  pAllocationRequest->offset = resultOffset;
    8649  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8650  pAllocationRequest->sumItemSize = 0;
    8651  // pAllocationRequest->item unused.
    8652  pAllocationRequest->itemsToMakeLostCount = 0;
    8653  return true;
    8654  }
    8655  }
    8656  else // !upperAddress
    8657  {
    8658  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8659  {
    8660  // Try to allocate at the end of 1st vector.
    8661 
    8662  VkDeviceSize resultBaseOffset = 0;
    8663  if(!suballocations1st.empty())
    8664  {
    8665  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8666  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8667  }
    8668 
    8669  // Start from offset equal to beginning of free space.
    8670  VkDeviceSize resultOffset = resultBaseOffset;
    8671 
    8672  // Apply VMA_DEBUG_MARGIN at the beginning.
    8673  if(VMA_DEBUG_MARGIN > 0)
    8674  {
    8675  resultOffset += VMA_DEBUG_MARGIN;
    8676  }
    8677 
    8678  // Apply alignment.
    8679  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8680 
    8681  // Check previous suballocations for BufferImageGranularity conflicts.
    8682  // Make bigger alignment if necessary.
    8683  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8684  {
    8685  bool bufferImageGranularityConflict = false;
    8686  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8687  {
    8688  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8690  {
    8691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8692  {
    8693  bufferImageGranularityConflict = true;
    8694  break;
    8695  }
    8696  }
    8697  else
    8698  // Already on previous page.
    8699  break;
    8700  }
    8701  if(bufferImageGranularityConflict)
    8702  {
    8703  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8704  }
    8705  }
    8706 
    8707  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8708  suballocations2nd.back().offset : size;
    8709 
    8710  // There is enough free space at the end after alignment.
    8711  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8712  {
    8713  // Check next suballocations for BufferImageGranularity conflicts.
    8714  // If conflict exists, allocation cannot be made here.
    8715  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8716  {
    8717  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8718  {
    8719  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8720  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8721  {
    8722  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8723  {
    8724  return false;
    8725  }
    8726  }
    8727  else
    8728  {
    8729  // Already on previous page.
    8730  break;
    8731  }
    8732  }
    8733  }
    8734 
    8735  // All tests passed: Success.
    8736  pAllocationRequest->offset = resultOffset;
    8737  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8738  pAllocationRequest->sumItemSize = 0;
    8739  // pAllocationRequest->item unused.
    8740  pAllocationRequest->itemsToMakeLostCount = 0;
    8741  return true;
    8742  }
    8743  }
    8744 
    8745  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8746  // beginning of 1st vector as the end of free space.
    8747  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8748  {
    8749  VMA_ASSERT(!suballocations1st.empty());
    8750 
    8751  VkDeviceSize resultBaseOffset = 0;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8756  }
    8757 
    8758  // Start from offset equal to beginning of free space.
    8759  VkDeviceSize resultOffset = resultBaseOffset;
    8760 
    8761  // Apply VMA_DEBUG_MARGIN at the beginning.
    8762  if(VMA_DEBUG_MARGIN > 0)
    8763  {
    8764  resultOffset += VMA_DEBUG_MARGIN;
    8765  }
    8766 
    8767  // Apply alignment.
    8768  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8769 
    8770  // Check previous suballocations for BufferImageGranularity conflicts.
    8771  // Make bigger alignment if necessary.
    8772  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8773  {
    8774  bool bufferImageGranularityConflict = false;
    8775  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8776  {
    8777  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8778  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8779  {
    8780  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8781  {
    8782  bufferImageGranularityConflict = true;
    8783  break;
    8784  }
    8785  }
    8786  else
    8787  // Already on previous page.
    8788  break;
    8789  }
    8790  if(bufferImageGranularityConflict)
    8791  {
    8792  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8793  }
    8794  }
    8795 
    8796  pAllocationRequest->itemsToMakeLostCount = 0;
    8797  pAllocationRequest->sumItemSize = 0;
    8798  size_t index1st = m_1stNullItemsBeginCount;
    8799 
    8800  if(canMakeOtherLost)
    8801  {
    8802  while(index1st < suballocations1st.size() &&
    8803  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8804  {
    8805  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8807  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8808  {
    8809  // No problem.
    8810  }
    8811  else
    8812  {
    8813  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8814  if(suballoc.hAllocation->CanBecomeLost() &&
    8815  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8816  {
    8817  ++pAllocationRequest->itemsToMakeLostCount;
    8818  pAllocationRequest->sumItemSize += suballoc.size;
    8819  }
    8820  else
    8821  {
    8822  return false;
    8823  }
    8824  }
    8825  ++index1st;
    8826  }
    8827 
    8828  // Check next suballocations for BufferImageGranularity conflicts.
    8829  // If conflict exists, we must mark more allocations lost or fail.
    8830  if(bufferImageGranularity > 1)
    8831  {
    8832  while(index1st < suballocations1st.size())
    8833  {
    8834  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8835  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8836  {
    8837  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8838  {
    8839  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8840  if(suballoc.hAllocation->CanBecomeLost() &&
    8841  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8842  {
    8843  ++pAllocationRequest->itemsToMakeLostCount;
    8844  pAllocationRequest->sumItemSize += suballoc.size;
    8845  }
    8846  else
    8847  {
    8848  return false;
    8849  }
    8850  }
    8851  }
    8852  else
    8853  {
    8854  // Already on next page.
    8855  break;
    8856  }
    8857  ++index1st;
    8858  }
    8859  }
    8860  }
    8861 
    8862  // There is enough free space at the end after alignment.
    8863  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8864  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8865  {
    8866  // Check next suballocations for BufferImageGranularity conflicts.
    8867  // If conflict exists, allocation cannot be made here.
    8868  if(bufferImageGranularity > 1)
    8869  {
    8870  for(size_t nextSuballocIndex = index1st;
    8871  nextSuballocIndex < suballocations1st.size();
    8872  nextSuballocIndex++)
    8873  {
    8874  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8875  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8876  {
    8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8878  {
    8879  return false;
    8880  }
    8881  }
    8882  else
    8883  {
    8884  // Already on next page.
    8885  break;
    8886  }
    8887  }
    8888  }
    8889 
    8890  // All tests passed: Success.
    8891  pAllocationRequest->offset = resultOffset;
    8892  pAllocationRequest->sumFreeSize =
    8893  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8894  - resultBaseOffset
    8895  - pAllocationRequest->sumItemSize;
    8896  // pAllocationRequest->item unused.
    8897  return true;
    8898  }
    8899  }
    8900  }
    8901 
    8902  return false;
    8903 }
    8904 
    8905 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8906  uint32_t currentFrameIndex,
    8907  uint32_t frameInUseCount,
    8908  VmaAllocationRequest* pAllocationRequest)
    8909 {
    8910  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8911  {
    8912  return true;
    8913  }
    8914 
    8915  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8916 
    8917  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8918  size_t index1st = m_1stNullItemsBeginCount;
    8919  size_t madeLostCount = 0;
    8920  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8921  {
    8922  VMA_ASSERT(index1st < suballocations1st.size());
    8923  VmaSuballocation& suballoc = suballocations1st[index1st];
    8924  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8925  {
    8926  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8927  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8928  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8929  {
    8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8931  suballoc.hAllocation = VK_NULL_HANDLE;
    8932  m_SumFreeSize += suballoc.size;
    8933  ++m_1stNullItemsMiddleCount;
    8934  ++madeLostCount;
    8935  }
    8936  else
    8937  {
    8938  return false;
    8939  }
    8940  }
    8941  ++index1st;
    8942  }
    8943 
    8944  CleanupAfterFree();
    8945  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8946 
    8947  return true;
    8948 }
    8949 
    8950 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8951 {
    8952  uint32_t lostAllocationCount = 0;
    8953 
    8954  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8955  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8956  {
    8957  VmaSuballocation& suballoc = suballocations1st[i];
    8958  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8959  suballoc.hAllocation->CanBecomeLost() &&
    8960  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8961  {
    8962  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8963  suballoc.hAllocation = VK_NULL_HANDLE;
    8964  ++m_1stNullItemsMiddleCount;
    8965  m_SumFreeSize += suballoc.size;
    8966  ++lostAllocationCount;
    8967  }
    8968  }
    8969 
    8970  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8971  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8972  {
    8973  VmaSuballocation& suballoc = suballocations2nd[i];
    8974  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8975  suballoc.hAllocation->CanBecomeLost() &&
    8976  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8977  {
    8978  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8979  suballoc.hAllocation = VK_NULL_HANDLE;
    8980  ++m_2ndNullItemsCount;
    8981  ++lostAllocationCount;
    8982  }
    8983  }
    8984 
    8985  if(lostAllocationCount)
    8986  {
    8987  CleanupAfterFree();
    8988  }
    8989 
    8990  return lostAllocationCount;
    8991 }
    8992 
    8993 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8994 {
    8995  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8996  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8997  {
    8998  const VmaSuballocation& suballoc = suballocations1st[i];
    8999  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9000  {
    9001  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9002  {
    9003  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9004  return VK_ERROR_VALIDATION_FAILED_EXT;
    9005  }
    9006  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9007  {
    9008  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9009  return VK_ERROR_VALIDATION_FAILED_EXT;
    9010  }
    9011  }
    9012  }
    9013 
    9014  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9015  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9016  {
    9017  const VmaSuballocation& suballoc = suballocations2nd[i];
    9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9019  {
    9020  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9021  {
    9022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9023  return VK_ERROR_VALIDATION_FAILED_EXT;
    9024  }
    9025  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9026  {
    9027  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9028  return VK_ERROR_VALIDATION_FAILED_EXT;
    9029  }
    9030  }
    9031  }
    9032 
    9033  return VK_SUCCESS;
    9034 }
    9035 
    9036 void VmaBlockMetadata_Linear::Alloc(
    9037  const VmaAllocationRequest& request,
    9038  VmaSuballocationType type,
    9039  VkDeviceSize allocSize,
    9040  bool upperAddress,
    9041  VmaAllocation hAllocation)
    9042 {
    9043  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9044 
    9045  if(upperAddress)
    9046  {
    9047  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9048  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9049  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9050  suballocations2nd.push_back(newSuballoc);
    9051  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9052  }
    9053  else
    9054  {
    9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9056 
    9057  // First allocation.
    9058  if(suballocations1st.empty())
    9059  {
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  else
    9063  {
    9064  // New allocation at the end of 1st vector.
    9065  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9066  {
    9067  // Check if it fits before the end of the block.
    9068  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9069  suballocations1st.push_back(newSuballoc);
    9070  }
    9071  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9072  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9073  {
    9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075 
    9076  switch(m_2ndVectorMode)
    9077  {
    9078  case SECOND_VECTOR_EMPTY:
    9079  // First allocation from second part ring buffer.
    9080  VMA_ASSERT(suballocations2nd.empty());
    9081  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9082  break;
    9083  case SECOND_VECTOR_RING_BUFFER:
    9084  // 2-part ring buffer is already started.
    9085  VMA_ASSERT(!suballocations2nd.empty());
    9086  break;
    9087  case SECOND_VECTOR_DOUBLE_STACK:
    9088  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9089  break;
    9090  default:
    9091  VMA_ASSERT(0);
    9092  }
    9093 
    9094  suballocations2nd.push_back(newSuballoc);
    9095  }
    9096  else
    9097  {
    9098  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9099  }
    9100  }
    9101  }
    9102 
    9103  m_SumFreeSize -= newSuballoc.size;
    9104 }
    9105 
    9106 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9107 {
    9108  FreeAtOffset(allocation->GetOffset());
    9109 }
    9110 
    9111 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9112 {
    9113  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9114  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9115 
    9116  if(!suballocations1st.empty())
    9117  {
    9118  // First allocation: Mark it as next empty at the beginning.
    9119  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9120  if(firstSuballoc.offset == offset)
    9121  {
    9122  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9123  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9124  m_SumFreeSize += firstSuballoc.size;
    9125  ++m_1stNullItemsBeginCount;
    9126  CleanupAfterFree();
    9127  return;
    9128  }
    9129  }
    9130 
    9131  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9132  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9133  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9134  {
    9135  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9136  if(lastSuballoc.offset == offset)
    9137  {
    9138  m_SumFreeSize += lastSuballoc.size;
    9139  suballocations2nd.pop_back();
    9140  CleanupAfterFree();
    9141  return;
    9142  }
    9143  }
    9144  // Last allocation in 1st vector.
    9145  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9146  {
    9147  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9148  if(lastSuballoc.offset == offset)
    9149  {
    9150  m_SumFreeSize += lastSuballoc.size;
    9151  suballocations1st.pop_back();
    9152  CleanupAfterFree();
    9153  return;
    9154  }
    9155  }
    9156 
    9157  // Item from the middle of 1st vector.
    9158  {
    9159  VmaSuballocation refSuballoc;
    9160  refSuballoc.offset = offset;
    9161  // Rest of members stays uninitialized intentionally for better performance.
    9162  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9163  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9164  suballocations1st.end(),
    9165  refSuballoc);
    9166  if(it != suballocations1st.end())
    9167  {
    9168  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  it->hAllocation = VK_NULL_HANDLE;
    9170  ++m_1stNullItemsMiddleCount;
    9171  m_SumFreeSize += it->size;
    9172  CleanupAfterFree();
    9173  return;
    9174  }
    9175  }
    9176 
    9177  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9178  {
    9179  // Item from the middle of 2nd vector.
    9180  VmaSuballocation refSuballoc;
    9181  refSuballoc.offset = offset;
    9182  // Rest of members stays uninitialized intentionally for better performance.
    9183  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9184  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9185  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9186  if(it != suballocations2nd.end())
    9187  {
    9188  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9189  it->hAllocation = VK_NULL_HANDLE;
    9190  ++m_2ndNullItemsCount;
    9191  m_SumFreeSize += it->size;
    9192  CleanupAfterFree();
    9193  return;
    9194  }
    9195  }
    9196 
    9197  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9198 }
    9199 
    9200 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9201 {
    9202  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9203  const size_t suballocCount = AccessSuballocations1st().size();
    9204  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9205 }
    9206 
    9207 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9208 {
    9209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9211 
    9212  if(IsEmpty())
    9213  {
    9214  suballocations1st.clear();
    9215  suballocations2nd.clear();
    9216  m_1stNullItemsBeginCount = 0;
    9217  m_1stNullItemsMiddleCount = 0;
    9218  m_2ndNullItemsCount = 0;
    9219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9220  }
    9221  else
    9222  {
    9223  const size_t suballoc1stCount = suballocations1st.size();
    9224  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9225  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9226 
    9227  // Find more null items at the beginning of 1st vector.
    9228  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9229  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9230  {
    9231  ++m_1stNullItemsBeginCount;
    9232  --m_1stNullItemsMiddleCount;
    9233  }
    9234 
    9235  // Find more null items at the end of 1st vector.
    9236  while(m_1stNullItemsMiddleCount > 0 &&
    9237  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9238  {
    9239  --m_1stNullItemsMiddleCount;
    9240  suballocations1st.pop_back();
    9241  }
    9242 
    9243  // Find more null items at the end of 2nd vector.
    9244  while(m_2ndNullItemsCount > 0 &&
    9245  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9246  {
    9247  --m_2ndNullItemsCount;
    9248  suballocations2nd.pop_back();
    9249  }
    9250 
    9251  if(ShouldCompact1st())
    9252  {
    9253  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9254  size_t srcIndex = m_1stNullItemsBeginCount;
    9255  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9256  {
    9257  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9258  {
    9259  ++srcIndex;
    9260  }
    9261  if(dstIndex != srcIndex)
    9262  {
    9263  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9264  }
    9265  ++srcIndex;
    9266  }
    9267  suballocations1st.resize(nonNullItemCount);
    9268  m_1stNullItemsBeginCount = 0;
    9269  m_1stNullItemsMiddleCount = 0;
    9270  }
    9271 
    9272  // 2nd vector became empty.
    9273  if(suballocations2nd.empty())
    9274  {
    9275  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9276  }
    9277 
    9278  // 1st vector became empty.
    9279  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9280  {
    9281  suballocations1st.clear();
    9282  m_1stNullItemsBeginCount = 0;
    9283 
    9284  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9285  {
    9286  // Swap 1st with 2nd. Now 2nd is empty.
    9287  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9288  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9289  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9290  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9291  {
    9292  ++m_1stNullItemsBeginCount;
    9293  --m_1stNullItemsMiddleCount;
    9294  }
    9295  m_2ndNullItemsCount = 0;
    9296  m_1stVectorIndex ^= 1;
    9297  }
    9298  }
    9299  }
    9300 
    9301  VMA_HEAVY_ASSERT(Validate());
    9302 }
    9303 
    9304 
    9306 // class VmaBlockMetadata_Buddy
    9307 
    9308 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9309  VmaBlockMetadata(hAllocator),
    9310  m_Root(VMA_NULL),
    9311  m_AllocationCount(0),
    9312  m_FreeCount(1),
    9313  m_SumFreeSize(0)
    9314 {
    9315  memset(m_FreeList, 0, sizeof(m_FreeList));
    9316 }
    9317 
    9318 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9319 {
    9320  DeleteNode(m_Root);
    9321 }
    9322 
    9323 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9324 {
    9325  VmaBlockMetadata::Init(size);
    9326 
    9327  m_UsableSize = VmaPrevPow2(size);
    9328  m_SumFreeSize = m_UsableSize;
    9329 
    9330  // Calculate m_LevelCount.
    9331  m_LevelCount = 1;
    9332  while(m_LevelCount < MAX_LEVELS &&
    9333  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9334  {
    9335  ++m_LevelCount;
    9336  }
    9337 
    9338  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9339  rootNode->offset = 0;
    9340  rootNode->type = Node::TYPE_FREE;
    9341  rootNode->parent = VMA_NULL;
    9342  rootNode->buddy = VMA_NULL;
    9343 
    9344  m_Root = rootNode;
    9345  AddToFreeListFront(0, rootNode);
    9346 }
    9347 
    9348 bool VmaBlockMetadata_Buddy::Validate() const
    9349 {
    9350  // Validate tree.
    9351  ValidationContext ctx;
    9352  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9353  {
    9354  VMA_VALIDATE(false && "ValidateNode failed.");
    9355  }
    9356  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9357  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9358 
    9359  // Validate free node lists.
    9360  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9361  {
    9362  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9363  m_FreeList[level].front->free.prev == VMA_NULL);
    9364 
    9365  for(Node* node = m_FreeList[level].front;
    9366  node != VMA_NULL;
    9367  node = node->free.next)
    9368  {
    9369  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9370 
    9371  if(node->free.next == VMA_NULL)
    9372  {
    9373  VMA_VALIDATE(m_FreeList[level].back == node);
    9374  }
    9375  else
    9376  {
    9377  VMA_VALIDATE(node->free.next->free.prev == node);
    9378  }
    9379  }
    9380  }
    9381 
    9382  // Validate that free lists ar higher levels are empty.
    9383  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9384  {
    9385  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9386  }
    9387 
    9388  return true;
    9389 }
    9390 
    9391 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9392 {
    9393  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9394  {
    9395  if(m_FreeList[level].front != VMA_NULL)
    9396  {
    9397  return LevelToNodeSize(level);
    9398  }
    9399  }
    9400  return 0;
    9401 }
    9402 
    9403 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9404 {
    9405  const VkDeviceSize unusableSize = GetUnusableSize();
    9406 
    9407  outInfo.blockCount = 1;
    9408 
    9409  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9410  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9411 
    9412  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9413  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9414  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9415 
    9416  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9417 
    9418  if(unusableSize > 0)
    9419  {
    9420  ++outInfo.unusedRangeCount;
    9421  outInfo.unusedBytes += unusableSize;
    9422  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9423  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9424  }
    9425 }
    9426 
    9427 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9428 {
    9429  const VkDeviceSize unusableSize = GetUnusableSize();
    9430 
    9431  inoutStats.size += GetSize();
    9432  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9433  inoutStats.allocationCount += m_AllocationCount;
    9434  inoutStats.unusedRangeCount += m_FreeCount;
    9435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9436 
    9437  if(unusableSize > 0)
    9438  {
    9439  ++inoutStats.unusedRangeCount;
    9440  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9441  }
    9442 }
    9443 
    9444 #if VMA_STATS_STRING_ENABLED
    9445 
    9446 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9447 {
    9448  // TODO optimize
    9449  VmaStatInfo stat;
    9450  CalcAllocationStatInfo(stat);
    9451 
    9452  PrintDetailedMap_Begin(
    9453  json,
    9454  stat.unusedBytes,
    9455  stat.allocationCount,
    9456  stat.unusedRangeCount);
    9457 
    9458  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9459 
    9460  const VkDeviceSize unusableSize = GetUnusableSize();
    9461  if(unusableSize > 0)
    9462  {
    9463  PrintDetailedMap_UnusedRange(json,
    9464  m_UsableSize, // offset
    9465  unusableSize); // size
    9466  }
    9467 
    9468  PrintDetailedMap_End(json);
    9469 }
    9470 
    9471 #endif // #if VMA_STATS_STRING_ENABLED
    9472 
    9473 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9474  uint32_t currentFrameIndex,
    9475  uint32_t frameInUseCount,
    9476  VkDeviceSize bufferImageGranularity,
    9477  VkDeviceSize allocSize,
    9478  VkDeviceSize allocAlignment,
    9479  bool upperAddress,
    9480  VmaSuballocationType allocType,
    9481  bool canMakeOtherLost,
    9482  uint32_t strategy,
    9483  VmaAllocationRequest* pAllocationRequest)
    9484 {
    9485  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9486 
    9487  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9488  // Whenever it might be an OPTIMAL image...
    9489  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9491  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9492  {
    9493  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9494  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9495  }
    9496 
    9497  if(allocSize > m_UsableSize)
    9498  {
    9499  return false;
    9500  }
    9501 
    9502  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9503  for(uint32_t level = targetLevel + 1; level--; )
    9504  {
    9505  for(Node* freeNode = m_FreeList[level].front;
    9506  freeNode != VMA_NULL;
    9507  freeNode = freeNode->free.next)
    9508  {
    9509  if(freeNode->offset % allocAlignment == 0)
    9510  {
    9511  pAllocationRequest->offset = freeNode->offset;
    9512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9513  pAllocationRequest->sumItemSize = 0;
    9514  pAllocationRequest->itemsToMakeLostCount = 0;
    9515  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9516  return true;
    9517  }
    9518  }
    9519  }
    9520 
    9521  return false;
    9522 }
    9523 
    9524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9525  uint32_t currentFrameIndex,
    9526  uint32_t frameInUseCount,
    9527  VmaAllocationRequest* pAllocationRequest)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return pAllocationRequest->itemsToMakeLostCount == 0;
    9534 }
    9535 
    9536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9537 {
    9538  /*
    9539  Lost allocations are not supported in buddy allocator at the moment.
    9540  Support might be added in the future.
    9541  */
    9542  return 0;
    9543 }
    9544 
    9545 void VmaBlockMetadata_Buddy::Alloc(
    9546  const VmaAllocationRequest& request,
    9547  VmaSuballocationType type,
    9548  VkDeviceSize allocSize,
    9549  bool upperAddress,
    9550  VmaAllocation hAllocation)
    9551 {
    9552  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9553  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9554 
    9555  Node* currNode = m_FreeList[currLevel].front;
    9556  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9557  while(currNode->offset != request.offset)
    9558  {
    9559  currNode = currNode->free.next;
    9560  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9561  }
    9562 
    9563  // Go down, splitting free nodes.
    9564  while(currLevel < targetLevel)
    9565  {
    9566  // currNode is already first free node at currLevel.
    9567  // Remove it from list of free nodes at this currLevel.
    9568  RemoveFromFreeList(currLevel, currNode);
    9569 
    9570  const uint32_t childrenLevel = currLevel + 1;
    9571 
    9572  // Create two free sub-nodes.
    9573  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9574  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9575 
    9576  leftChild->offset = currNode->offset;
    9577  leftChild->type = Node::TYPE_FREE;
    9578  leftChild->parent = currNode;
    9579  leftChild->buddy = rightChild;
    9580 
    9581  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9582  rightChild->type = Node::TYPE_FREE;
    9583  rightChild->parent = currNode;
    9584  rightChild->buddy = leftChild;
    9585 
    9586  // Convert current currNode to split type.
    9587  currNode->type = Node::TYPE_SPLIT;
    9588  currNode->split.leftChild = leftChild;
    9589 
    9590  // Add child nodes to free list. Order is important!
    9591  AddToFreeListFront(childrenLevel, rightChild);
    9592  AddToFreeListFront(childrenLevel, leftChild);
    9593 
    9594  ++m_FreeCount;
    9595  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9596  ++currLevel;
    9597  currNode = m_FreeList[currLevel].front;
    9598 
    9599  /*
    9600  We can be sure that currNode, as left child of node previously split,
    9601  also fullfills the alignment requirement.
    9602  */
    9603  }
    9604 
    9605  // Remove from free list.
    9606  VMA_ASSERT(currLevel == targetLevel &&
    9607  currNode != VMA_NULL &&
    9608  currNode->type == Node::TYPE_FREE);
    9609  RemoveFromFreeList(currLevel, currNode);
    9610 
    9611  // Convert to allocation node.
    9612  currNode->type = Node::TYPE_ALLOCATION;
    9613  currNode->allocation.alloc = hAllocation;
    9614 
    9615  ++m_AllocationCount;
    9616  --m_FreeCount;
    9617  m_SumFreeSize -= allocSize;
    9618 }
    9619 
    9620 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9621 {
    9622  if(node->type == Node::TYPE_SPLIT)
    9623  {
    9624  DeleteNode(node->split.leftChild->buddy);
    9625  DeleteNode(node->split.leftChild);
    9626  }
    9627 
    9628  vma_delete(GetAllocationCallbacks(), node);
    9629 }
    9630 
    9631 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9632 {
    9633  VMA_VALIDATE(level < m_LevelCount);
    9634  VMA_VALIDATE(curr->parent == parent);
    9635  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9636  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9637  switch(curr->type)
    9638  {
    9639  case Node::TYPE_FREE:
    9640  // curr->free.prev, next are validated separately.
    9641  ctx.calculatedSumFreeSize += levelNodeSize;
    9642  ++ctx.calculatedFreeCount;
    9643  break;
    9644  case Node::TYPE_ALLOCATION:
    9645  ++ctx.calculatedAllocationCount;
    9646  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9647  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9648  break;
    9649  case Node::TYPE_SPLIT:
    9650  {
    9651  const uint32_t childrenLevel = level + 1;
    9652  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9653  const Node* const leftChild = curr->split.leftChild;
    9654  VMA_VALIDATE(leftChild != VMA_NULL);
    9655  VMA_VALIDATE(leftChild->offset == curr->offset);
    9656  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9657  {
    9658  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9659  }
    9660  const Node* const rightChild = leftChild->buddy;
    9661  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9662  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9663  {
    9664  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9665  }
    9666  }
    9667  break;
    9668  default:
    9669  return false;
    9670  }
    9671 
    9672  return true;
    9673 }
    9674 
    9675 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9676 {
    9677  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9678  uint32_t level = 0;
    9679  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9680  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9681  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9682  {
    9683  ++level;
    9684  currLevelNodeSize = nextLevelNodeSize;
    9685  nextLevelNodeSize = currLevelNodeSize >> 1;
    9686  }
    9687  return level;
    9688 }
    9689 
    9690 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9691 {
    9692  // Find node and level.
    9693  Node* node = m_Root;
    9694  VkDeviceSize nodeOffset = 0;
    9695  uint32_t level = 0;
    9696  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9697  while(node->type == Node::TYPE_SPLIT)
    9698  {
    9699  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9700  if(offset < nodeOffset + nextLevelSize)
    9701  {
    9702  node = node->split.leftChild;
    9703  }
    9704  else
    9705  {
    9706  node = node->split.leftChild->buddy;
    9707  nodeOffset += nextLevelSize;
    9708  }
    9709  ++level;
    9710  levelNodeSize = nextLevelSize;
    9711  }
    9712 
    9713  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9714  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9715 
    9716  ++m_FreeCount;
    9717  --m_AllocationCount;
    9718  m_SumFreeSize += alloc->GetSize();
    9719 
    9720  node->type = Node::TYPE_FREE;
    9721 
    9722  // Join free nodes if possible.
    9723  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9724  {
    9725  RemoveFromFreeList(level, node->buddy);
    9726  Node* const parent = node->parent;
    9727 
    9728  vma_delete(GetAllocationCallbacks(), node->buddy);
    9729  vma_delete(GetAllocationCallbacks(), node);
    9730  parent->type = Node::TYPE_FREE;
    9731 
    9732  node = parent;
    9733  --level;
    9734  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9735  --m_FreeCount;
    9736  }
    9737 
    9738  AddToFreeListFront(level, node);
    9739 }
    9740 
    9741 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9742 {
    9743  switch(node->type)
    9744  {
    9745  case Node::TYPE_FREE:
    9746  ++outInfo.unusedRangeCount;
    9747  outInfo.unusedBytes += levelNodeSize;
    9748  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9749  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9750  break;
    9751  case Node::TYPE_ALLOCATION:
    9752  {
    9753  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9754  ++outInfo.allocationCount;
    9755  outInfo.usedBytes += allocSize;
    9756  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9757  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9758 
    9759  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9760  if(unusedRangeSize > 0)
    9761  {
    9762  ++outInfo.unusedRangeCount;
    9763  outInfo.unusedBytes += unusedRangeSize;
    9764  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9765  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9766  }
    9767  }
    9768  break;
    9769  case Node::TYPE_SPLIT:
    9770  {
    9771  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9772  const Node* const leftChild = node->split.leftChild;
    9773  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9774  const Node* const rightChild = leftChild->buddy;
    9775  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9776  }
    9777  break;
    9778  default:
    9779  VMA_ASSERT(0);
    9780  }
    9781 }
    9782 
    9783 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9784 {
    9785  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9786 
    9787  // List is empty.
    9788  Node* const frontNode = m_FreeList[level].front;
    9789  if(frontNode == VMA_NULL)
    9790  {
    9791  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9792  node->free.prev = node->free.next = VMA_NULL;
    9793  m_FreeList[level].front = m_FreeList[level].back = node;
    9794  }
    9795  else
    9796  {
    9797  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9798  node->free.prev = VMA_NULL;
    9799  node->free.next = frontNode;
    9800  frontNode->free.prev = node;
    9801  m_FreeList[level].front = node;
    9802  }
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9806 {
    9807  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9808 
    9809  // It is at the front.
    9810  if(node->free.prev == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].front == node);
    9813  m_FreeList[level].front = node->free.next;
    9814  }
    9815  else
    9816  {
    9817  Node* const prevFreeNode = node->free.prev;
    9818  VMA_ASSERT(prevFreeNode->free.next == node);
    9819  prevFreeNode->free.next = node->free.next;
    9820  }
    9821 
    9822  // It is at the back.
    9823  if(node->free.next == VMA_NULL)
    9824  {
    9825  VMA_ASSERT(m_FreeList[level].back == node);
    9826  m_FreeList[level].back = node->free.prev;
    9827  }
    9828  else
    9829  {
    9830  Node* const nextFreeNode = node->free.next;
    9831  VMA_ASSERT(nextFreeNode->free.prev == node);
    9832  nextFreeNode->free.prev = node->free.prev;
    9833  }
    9834 }
    9835 
    9836 #if VMA_STATS_STRING_ENABLED
    9837 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9838 {
    9839  switch(node->type)
    9840  {
    9841  case Node::TYPE_FREE:
    9842  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9843  break;
    9844  case Node::TYPE_ALLOCATION:
    9845  {
    9846  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9848  if(allocSize < levelNodeSize)
    9849  {
    9850  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9851  }
    9852  }
    9853  break;
    9854  case Node::TYPE_SPLIT:
    9855  {
    9856  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9857  const Node* const leftChild = node->split.leftChild;
    9858  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9859  const Node* const rightChild = leftChild->buddy;
    9860  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9861  }
    9862  break;
    9863  default:
    9864  VMA_ASSERT(0);
    9865  }
    9866 }
    9867 #endif // #if VMA_STATS_STRING_ENABLED
    9868 
    9869 
    9871 // class VmaDeviceMemoryBlock
    9872 
    9873 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9874  m_pMetadata(VMA_NULL),
    9875  m_MemoryTypeIndex(UINT32_MAX),
    9876  m_Id(0),
    9877  m_hMemory(VK_NULL_HANDLE),
    9878  m_MapCount(0),
    9879  m_pMappedData(VMA_NULL)
    9880 {
    9881 }
    9882 
    9883 void VmaDeviceMemoryBlock::Init(
    9884  VmaAllocator hAllocator,
    9885  uint32_t newMemoryTypeIndex,
    9886  VkDeviceMemory newMemory,
    9887  VkDeviceSize newSize,
    9888  uint32_t id,
    9889  uint32_t algorithm)
    9890 {
    9891  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9892 
    9893  m_MemoryTypeIndex = newMemoryTypeIndex;
    9894  m_Id = id;
    9895  m_hMemory = newMemory;
    9896 
    9897  switch(algorithm)
    9898  {
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9901  break;
    9903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9904  break;
    9905  default:
    9906  VMA_ASSERT(0);
    9907  // Fall-through.
    9908  case 0:
    9909  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9910  }
    9911  m_pMetadata->Init(newSize);
    9912 }
    9913 
    9914 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9915 {
    9916  // This is the most important assert in the entire library.
    9917  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9918  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9919 
    9920  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9921  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9922  m_hMemory = VK_NULL_HANDLE;
    9923 
    9924  vma_delete(allocator, m_pMetadata);
    9925  m_pMetadata = VMA_NULL;
    9926 }
    9927 
    9928 bool VmaDeviceMemoryBlock::Validate() const
    9929 {
    9930  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9931  (m_pMetadata->GetSize() != 0));
    9932 
    9933  return m_pMetadata->Validate();
    9934 }
    9935 
    9936 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9937 {
    9938  void* pData = nullptr;
    9939  VkResult res = Map(hAllocator, 1, &pData);
    9940  if(res != VK_SUCCESS)
    9941  {
    9942  return res;
    9943  }
    9944 
    9945  res = m_pMetadata->CheckCorruption(pData);
    9946 
    9947  Unmap(hAllocator, 1);
    9948 
    9949  return res;
    9950 }
    9951 
    9952 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9953 {
    9954  if(count == 0)
    9955  {
    9956  return VK_SUCCESS;
    9957  }
    9958 
    9959  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9960  if(m_MapCount != 0)
    9961  {
    9962  m_MapCount += count;
    9963  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9964  if(ppData != VMA_NULL)
    9965  {
    9966  *ppData = m_pMappedData;
    9967  }
    9968  return VK_SUCCESS;
    9969  }
    9970  else
    9971  {
    9972  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9973  hAllocator->m_hDevice,
    9974  m_hMemory,
    9975  0, // offset
    9976  VK_WHOLE_SIZE,
    9977  0, // flags
    9978  &m_pMappedData);
    9979  if(result == VK_SUCCESS)
    9980  {
    9981  if(ppData != VMA_NULL)
    9982  {
    9983  *ppData = m_pMappedData;
    9984  }
    9985  m_MapCount = count;
    9986  }
    9987  return result;
    9988  }
    9989 }
    9990 
    9991 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9992 {
    9993  if(count == 0)
    9994  {
    9995  return;
    9996  }
    9997 
    9998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9999  if(m_MapCount >= count)
    10000  {
    10001  m_MapCount -= count;
    10002  if(m_MapCount == 0)
    10003  {
    10004  m_pMappedData = VMA_NULL;
    10005  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10006  }
    10007  }
    10008  else
    10009  {
    10010  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10011  }
    10012 }
    10013 
    10014 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10015 {
    10016  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10017  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10018 
    10019  void* pData;
    10020  VkResult res = Map(hAllocator, 1, &pData);
    10021  if(res != VK_SUCCESS)
    10022  {
    10023  return res;
    10024  }
    10025 
    10026  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10027  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10028 
    10029  Unmap(hAllocator, 1);
    10030 
    10031  return VK_SUCCESS;
    10032 }
    10033 
    10034 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10035 {
    10036  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10037  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10038 
    10039  void* pData;
    10040  VkResult res = Map(hAllocator, 1, &pData);
    10041  if(res != VK_SUCCESS)
    10042  {
    10043  return res;
    10044  }
    10045 
    10046  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10047  {
    10048  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10049  }
    10050  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10051  {
    10052  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10053  }
    10054 
    10055  Unmap(hAllocator, 1);
    10056 
    10057  return VK_SUCCESS;
    10058 }
    10059 
    10060 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10061  const VmaAllocator hAllocator,
    10062  const VmaAllocation hAllocation,
    10063  VkBuffer hBuffer)
    10064 {
    10065  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10066  hAllocation->GetBlock() == this);
    10067  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10068  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10069  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10070  hAllocator->m_hDevice,
    10071  hBuffer,
    10072  m_hMemory,
    10073  hAllocation->GetOffset());
    10074 }
    10075 
    10076 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10077  const VmaAllocator hAllocator,
    10078  const VmaAllocation hAllocation,
    10079  VkImage hImage)
    10080 {
    10081  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10082  hAllocation->GetBlock() == this);
    10083  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10084  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10085  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10086  hAllocator->m_hDevice,
    10087  hImage,
    10088  m_hMemory,
    10089  hAllocation->GetOffset());
    10090 }
    10091 
    10092 static void InitStatInfo(VmaStatInfo& outInfo)
    10093 {
    10094  memset(&outInfo, 0, sizeof(outInfo));
    10095  outInfo.allocationSizeMin = UINT64_MAX;
    10096  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10097 }
    10098 
    10099 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10100 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10101 {
    10102  inoutInfo.blockCount += srcInfo.blockCount;
    10103  inoutInfo.allocationCount += srcInfo.allocationCount;
    10104  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10105  inoutInfo.usedBytes += srcInfo.usedBytes;
    10106  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10107  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10108  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10109  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10110  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10111 }
    10112 
    10113 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10114 {
    10115  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10116  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10117  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10118  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10119 }
    10120 
    10121 VmaPool_T::VmaPool_T(
    10122  VmaAllocator hAllocator,
    10123  const VmaPoolCreateInfo& createInfo,
    10124  VkDeviceSize preferredBlockSize) :
    10125  m_BlockVector(
    10126  hAllocator,
    10127  createInfo.memoryTypeIndex,
    10128  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10129  createInfo.minBlockCount,
    10130  createInfo.maxBlockCount,
    10131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10132  createInfo.frameInUseCount,
    10133  true, // isCustomPool
    10134  createInfo.blockSize != 0, // explicitBlockSize
    10135  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10136  m_Id(0)
    10137 {
    10138 }
    10139 
    10140 VmaPool_T::~VmaPool_T()
    10141 {
    10142 }
    10143 
    10144 #if VMA_STATS_STRING_ENABLED
    10145 
    10146 #endif // #if VMA_STATS_STRING_ENABLED
    10147 
    10148 VmaBlockVector::VmaBlockVector(
    10149  VmaAllocator hAllocator,
    10150  uint32_t memoryTypeIndex,
    10151  VkDeviceSize preferredBlockSize,
    10152  size_t minBlockCount,
    10153  size_t maxBlockCount,
    10154  VkDeviceSize bufferImageGranularity,
    10155  uint32_t frameInUseCount,
    10156  bool isCustomPool,
    10157  bool explicitBlockSize,
    10158  uint32_t algorithm) :
    10159  m_hAllocator(hAllocator),
    10160  m_MemoryTypeIndex(memoryTypeIndex),
    10161  m_PreferredBlockSize(preferredBlockSize),
    10162  m_MinBlockCount(minBlockCount),
    10163  m_MaxBlockCount(maxBlockCount),
    10164  m_BufferImageGranularity(bufferImageGranularity),
    10165  m_FrameInUseCount(frameInUseCount),
    10166  m_IsCustomPool(isCustomPool),
    10167  m_ExplicitBlockSize(explicitBlockSize),
    10168  m_Algorithm(algorithm),
    10169  m_HasEmptyBlock(false),
    10170  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10171  m_pDefragmentator(VMA_NULL),
    10172  m_NextBlockId(0)
    10173 {
    10174 }
    10175 
    10176 VmaBlockVector::~VmaBlockVector()
    10177 {
    10178  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10179 
    10180  for(size_t i = m_Blocks.size(); i--; )
    10181  {
    10182  m_Blocks[i]->Destroy(m_hAllocator);
    10183  vma_delete(m_hAllocator, m_Blocks[i]);
    10184  }
    10185 }
    10186 
    10187 VkResult VmaBlockVector::CreateMinBlocks()
    10188 {
    10189  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10190  {
    10191  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10192  if(res != VK_SUCCESS)
    10193  {
    10194  return res;
    10195  }
    10196  }
    10197  return VK_SUCCESS;
    10198 }
    10199 
    10200 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10201 {
    10202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10203 
    10204  const size_t blockCount = m_Blocks.size();
    10205 
    10206  pStats->size = 0;
    10207  pStats->unusedSize = 0;
    10208  pStats->allocationCount = 0;
    10209  pStats->unusedRangeCount = 0;
    10210  pStats->unusedRangeSizeMax = 0;
    10211  pStats->blockCount = blockCount;
    10212 
    10213  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10214  {
    10215  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10216  VMA_ASSERT(pBlock);
    10217  VMA_HEAVY_ASSERT(pBlock->Validate());
    10218  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10219  }
    10220 }
    10221 
    10222 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10223 {
    10224  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10225  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10226  (VMA_DEBUG_MARGIN > 0) &&
    10227  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10228 }
    10229 
    10230 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10231 
    10232 VkResult VmaBlockVector::Allocate(
    10233  VmaPool hCurrentPool,
    10234  uint32_t currentFrameIndex,
    10235  VkDeviceSize size,
    10236  VkDeviceSize alignment,
    10237  const VmaAllocationCreateInfo& createInfo,
    10238  VmaSuballocationType suballocType,
    10239  VmaAllocation* pAllocation)
    10240 {
    10241  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10242  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10243  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10244  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10245  const bool canCreateNewBlock =
    10246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10247  (m_Blocks.size() < m_MaxBlockCount);
    10248  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10249 
    10250  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10251  // Which in turn is available only when maxBlockCount = 1.
    10252  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10253  {
    10254  canMakeOtherLost = false;
    10255  }
    10256 
    10257  // Upper address can only be used with linear allocator and within single memory block.
    10258  if(isUpperAddress &&
    10259  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10260  {
    10261  return VK_ERROR_FEATURE_NOT_PRESENT;
    10262  }
    10263 
    10264  // Validate strategy.
    10265  switch(strategy)
    10266  {
    10267  case 0:
    10269  break;
    10273  break;
    10274  default:
    10275  return VK_ERROR_FEATURE_NOT_PRESENT;
    10276  }
    10277 
    10278  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10279  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10280  {
    10281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10282  }
    10283 
    10284  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10285 
    10286  /*
    10287  Under certain condition, this whole section can be skipped for optimization, so
    10288  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10289  e.g. for custom pools with linear algorithm.
    10290  */
    10291  if(!canMakeOtherLost || canCreateNewBlock)
    10292  {
    10293  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10294  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10296 
    10297  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10298  {
    10299  // Use only last block.
    10300  if(!m_Blocks.empty())
    10301  {
    10302  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10303  VMA_ASSERT(pCurrBlock);
    10304  VkResult res = AllocateFromBlock(
    10305  pCurrBlock,
    10306  hCurrentPool,
    10307  currentFrameIndex,
    10308  size,
    10309  alignment,
    10310  allocFlagsCopy,
    10311  createInfo.pUserData,
    10312  suballocType,
    10313  strategy,
    10314  pAllocation);
    10315  if(res == VK_SUCCESS)
    10316  {
    10317  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10318  return VK_SUCCESS;
    10319  }
    10320  }
    10321  }
    10322  else
    10323  {
    10325  {
    10326  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10327  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10328  {
    10329  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10330  VMA_ASSERT(pCurrBlock);
    10331  VkResult res = AllocateFromBlock(
    10332  pCurrBlock,
    10333  hCurrentPool,
    10334  currentFrameIndex,
    10335  size,
    10336  alignment,
    10337  allocFlagsCopy,
    10338  createInfo.pUserData,
    10339  suballocType,
    10340  strategy,
    10341  pAllocation);
    10342  if(res == VK_SUCCESS)
    10343  {
    10344  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10345  return VK_SUCCESS;
    10346  }
    10347  }
    10348  }
    10349  else // WORST_FIT, FIRST_FIT
    10350  {
    10351  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10352  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10353  {
    10354  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10355  VMA_ASSERT(pCurrBlock);
    10356  VkResult res = AllocateFromBlock(
    10357  pCurrBlock,
    10358  hCurrentPool,
    10359  currentFrameIndex,
    10360  size,
    10361  alignment,
    10362  allocFlagsCopy,
    10363  createInfo.pUserData,
    10364  suballocType,
    10365  strategy,
    10366  pAllocation);
    10367  if(res == VK_SUCCESS)
    10368  {
    10369  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10370  return VK_SUCCESS;
    10371  }
    10372  }
    10373  }
    10374  }
    10375 
    10376  // 2. Try to create new block.
    10377  if(canCreateNewBlock)
    10378  {
    10379  // Calculate optimal size for new block.
    10380  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10381  uint32_t newBlockSizeShift = 0;
    10382  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10383 
    10384  if(!m_ExplicitBlockSize)
    10385  {
    10386  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10387  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10388  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10389  {
    10390  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10391  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10392  {
    10393  newBlockSize = smallerNewBlockSize;
    10394  ++newBlockSizeShift;
    10395  }
    10396  else
    10397  {
    10398  break;
    10399  }
    10400  }
    10401  }
    10402 
    10403  size_t newBlockIndex = 0;
    10404  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10405  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10406  if(!m_ExplicitBlockSize)
    10407  {
    10408  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10409  {
    10410  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10411  if(smallerNewBlockSize >= size)
    10412  {
    10413  newBlockSize = smallerNewBlockSize;
    10414  ++newBlockSizeShift;
    10415  res = CreateBlock(newBlockSize, &newBlockIndex);
    10416  }
    10417  else
    10418  {
    10419  break;
    10420  }
    10421  }
    10422  }
    10423 
    10424  if(res == VK_SUCCESS)
    10425  {
    10426  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10427  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10428 
    10429  res = AllocateFromBlock(
    10430  pBlock,
    10431  hCurrentPool,
    10432  currentFrameIndex,
    10433  size,
    10434  alignment,
    10435  allocFlagsCopy,
    10436  createInfo.pUserData,
    10437  suballocType,
    10438  strategy,
    10439  pAllocation);
    10440  if(res == VK_SUCCESS)
    10441  {
    10442  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10443  return VK_SUCCESS;
    10444  }
    10445  else
    10446  {
    10447  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10449  }
    10450  }
    10451  }
    10452  }
    10453 
    10454  // 3. Try to allocate from existing blocks with making other allocations lost.
    10455  if(canMakeOtherLost)
    10456  {
    10457  uint32_t tryIndex = 0;
    10458  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10459  {
    10460  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10461  VmaAllocationRequest bestRequest = {};
    10462  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10463 
    10464  // 1. Search existing allocations.
    10466  {
    10467  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10468  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10469  {
    10470  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10471  VMA_ASSERT(pCurrBlock);
    10472  VmaAllocationRequest currRequest = {};
    10473  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10474  currentFrameIndex,
    10475  m_FrameInUseCount,
    10476  m_BufferImageGranularity,
    10477  size,
    10478  alignment,
    10479  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10480  suballocType,
    10481  canMakeOtherLost,
    10482  strategy,
    10483  &currRequest))
    10484  {
    10485  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10486  if(pBestRequestBlock == VMA_NULL ||
    10487  currRequestCost < bestRequestCost)
    10488  {
    10489  pBestRequestBlock = pCurrBlock;
    10490  bestRequest = currRequest;
    10491  bestRequestCost = currRequestCost;
    10492 
    10493  if(bestRequestCost == 0)
    10494  {
    10495  break;
    10496  }
    10497  }
    10498  }
    10499  }
    10500  }
    10501  else // WORST_FIT, FIRST_FIT
    10502  {
    10503  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10504  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10505  {
    10506  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10507  VMA_ASSERT(pCurrBlock);
    10508  VmaAllocationRequest currRequest = {};
    10509  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10510  currentFrameIndex,
    10511  m_FrameInUseCount,
    10512  m_BufferImageGranularity,
    10513  size,
    10514  alignment,
    10515  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10516  suballocType,
    10517  canMakeOtherLost,
    10518  strategy,
    10519  &currRequest))
    10520  {
    10521  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10522  if(pBestRequestBlock == VMA_NULL ||
    10523  currRequestCost < bestRequestCost ||
    10525  {
    10526  pBestRequestBlock = pCurrBlock;
    10527  bestRequest = currRequest;
    10528  bestRequestCost = currRequestCost;
    10529 
    10530  if(bestRequestCost == 0 ||
    10532  {
    10533  break;
    10534  }
    10535  }
    10536  }
    10537  }
    10538  }
    10539 
    10540  if(pBestRequestBlock != VMA_NULL)
    10541  {
    10542  if(mapped)
    10543  {
    10544  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10545  if(res != VK_SUCCESS)
    10546  {
    10547  return res;
    10548  }
    10549  }
    10550 
    10551  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10552  currentFrameIndex,
    10553  m_FrameInUseCount,
    10554  &bestRequest))
    10555  {
    10556  // We no longer have an empty Allocation.
    10557  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10558  {
    10559  m_HasEmptyBlock = false;
    10560  }
    10561  // Allocate from this pBlock.
    10562  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10563  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10564  (*pAllocation)->InitBlockAllocation(
    10565  hCurrentPool,
    10566  pBestRequestBlock,
    10567  bestRequest.offset,
    10568  alignment,
    10569  size,
    10570  suballocType,
    10571  mapped,
    10572  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10573  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10574  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10575  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10576  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10577  {
    10578  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10579  }
    10580  if(IsCorruptionDetectionEnabled())
    10581  {
    10582  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10583  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10584  }
    10585  return VK_SUCCESS;
    10586  }
    10587  // else: Some allocations must have been touched while we are here. Next try.
    10588  }
    10589  else
    10590  {
    10591  // Could not find place in any of the blocks - break outer loop.
    10592  break;
    10593  }
    10594  }
    10595  /* Maximum number of tries exceeded - a very unlike event when many other
    10596  threads are simultaneously touching allocations making it impossible to make
    10597  lost at the same time as we try to allocate. */
    10598  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10599  {
    10600  return VK_ERROR_TOO_MANY_OBJECTS;
    10601  }
    10602  }
    10603 
    10604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10605 }
    10606 
    10607 void VmaBlockVector::Free(
    10608  VmaAllocation hAllocation)
    10609 {
    10610  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10611 
    10612  // Scope for lock.
    10613  {
    10614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10615 
    10616  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10617 
    10618  if(IsCorruptionDetectionEnabled())
    10619  {
    10620  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10621  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10622  }
    10623 
    10624  if(hAllocation->IsPersistentMap())
    10625  {
    10626  pBlock->Unmap(m_hAllocator, 1);
    10627  }
    10628 
    10629  pBlock->m_pMetadata->Free(hAllocation);
    10630  VMA_HEAVY_ASSERT(pBlock->Validate());
    10631 
    10632  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10633 
    10634  // pBlock became empty after this deallocation.
    10635  if(pBlock->m_pMetadata->IsEmpty())
    10636  {
    10637  // Already has empty Allocation. We don't want to have two, so delete this one.
    10638  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10639  {
    10640  pBlockToDelete = pBlock;
    10641  Remove(pBlock);
    10642  }
    10643  // We now have first empty block.
    10644  else
    10645  {
    10646  m_HasEmptyBlock = true;
    10647  }
    10648  }
    10649  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10650  // (This is optional, heuristics.)
    10651  else if(m_HasEmptyBlock)
    10652  {
    10653  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10654  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10655  {
    10656  pBlockToDelete = pLastBlock;
    10657  m_Blocks.pop_back();
    10658  m_HasEmptyBlock = false;
    10659  }
    10660  }
    10661 
    10662  IncrementallySortBlocks();
    10663  }
    10664 
    10665  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10666  // lock, for performance reason.
    10667  if(pBlockToDelete != VMA_NULL)
    10668  {
    10669  VMA_DEBUG_LOG(" Deleted empty allocation");
    10670  pBlockToDelete->Destroy(m_hAllocator);
    10671  vma_delete(m_hAllocator, pBlockToDelete);
    10672  }
    10673 }
    10674 
    10675 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10676 {
    10677  VkDeviceSize result = 0;
    10678  for(size_t i = m_Blocks.size(); i--; )
    10679  {
    10680  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10681  if(result >= m_PreferredBlockSize)
    10682  {
    10683  break;
    10684  }
    10685  }
    10686  return result;
    10687 }
    10688 
    10689 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10690 {
    10691  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10692  {
    10693  if(m_Blocks[blockIndex] == pBlock)
    10694  {
    10695  VmaVectorRemove(m_Blocks, blockIndex);
    10696  return;
    10697  }
    10698  }
    10699  VMA_ASSERT(0);
    10700 }
    10701 
    10702 void VmaBlockVector::IncrementallySortBlocks()
    10703 {
    10704  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10705  {
    10706  // Bubble sort only until first swap.
    10707  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10708  {
    10709  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10710  {
    10711  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10712  return;
    10713  }
    10714  }
    10715  }
    10716 }
    10717 
    10718 VkResult VmaBlockVector::AllocateFromBlock(
    10719  VmaDeviceMemoryBlock* pBlock,
    10720  VmaPool hCurrentPool,
    10721  uint32_t currentFrameIndex,
    10722  VkDeviceSize size,
    10723  VkDeviceSize alignment,
    10724  VmaAllocationCreateFlags allocFlags,
    10725  void* pUserData,
    10726  VmaSuballocationType suballocType,
    10727  uint32_t strategy,
    10728  VmaAllocation* pAllocation)
    10729 {
    10730  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10731  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10732  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10733  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10734 
    10735  VmaAllocationRequest currRequest = {};
    10736  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  m_BufferImageGranularity,
    10740  size,
    10741  alignment,
    10742  isUpperAddress,
    10743  suballocType,
    10744  false, // canMakeOtherLost
    10745  strategy,
    10746  &currRequest))
    10747  {
    10748  // Allocate from pCurrBlock.
    10749  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10750 
    10751  if(mapped)
    10752  {
    10753  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10754  if(res != VK_SUCCESS)
    10755  {
    10756  return res;
    10757  }
    10758  }
    10759 
    10760  // We no longer have an empty Allocation.
    10761  if(pBlock->m_pMetadata->IsEmpty())
    10762  {
    10763  m_HasEmptyBlock = false;
    10764  }
    10765 
    10766  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10767  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10768  (*pAllocation)->InitBlockAllocation(
    10769  hCurrentPool,
    10770  pBlock,
    10771  currRequest.offset,
    10772  alignment,
    10773  size,
    10774  suballocType,
    10775  mapped,
    10776  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10777  VMA_HEAVY_ASSERT(pBlock->Validate());
    10778  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10779  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10780  {
    10781  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10782  }
    10783  if(IsCorruptionDetectionEnabled())
    10784  {
    10785  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10786  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10787  }
    10788  return VK_SUCCESS;
    10789  }
    10790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10791 }
    10792 
    10793 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10794 {
    10795  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10796  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10797  allocInfo.allocationSize = blockSize;
    10798  VkDeviceMemory mem = VK_NULL_HANDLE;
    10799  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10800  if(res < 0)
    10801  {
    10802  return res;
    10803  }
    10804 
    10805  // New VkDeviceMemory successfully created.
    10806 
    10807  // Create new Allocation for it.
    10808  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10809  pBlock->Init(
    10810  m_hAllocator,
    10811  m_MemoryTypeIndex,
    10812  mem,
    10813  allocInfo.allocationSize,
    10814  m_NextBlockId++,
    10815  m_Algorithm);
    10816 
    10817  m_Blocks.push_back(pBlock);
    10818  if(pNewBlockIndex != VMA_NULL)
    10819  {
    10820  *pNewBlockIndex = m_Blocks.size() - 1;
    10821  }
    10822 
    10823  return VK_SUCCESS;
    10824 }
    10825 
    10826 #if VMA_STATS_STRING_ENABLED
    10827 
    10828 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10829 {
    10830  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10831 
    10832  json.BeginObject();
    10833 
    10834  if(m_IsCustomPool)
    10835  {
    10836  json.WriteString("MemoryTypeIndex");
    10837  json.WriteNumber(m_MemoryTypeIndex);
    10838 
    10839  json.WriteString("BlockSize");
    10840  json.WriteNumber(m_PreferredBlockSize);
    10841 
    10842  json.WriteString("BlockCount");
    10843  json.BeginObject(true);
    10844  if(m_MinBlockCount > 0)
    10845  {
    10846  json.WriteString("Min");
    10847  json.WriteNumber((uint64_t)m_MinBlockCount);
    10848  }
    10849  if(m_MaxBlockCount < SIZE_MAX)
    10850  {
    10851  json.WriteString("Max");
    10852  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10853  }
    10854  json.WriteString("Cur");
    10855  json.WriteNumber((uint64_t)m_Blocks.size());
    10856  json.EndObject();
    10857 
    10858  if(m_FrameInUseCount > 0)
    10859  {
    10860  json.WriteString("FrameInUseCount");
    10861  json.WriteNumber(m_FrameInUseCount);
    10862  }
    10863 
    10864  if(m_Algorithm != 0)
    10865  {
    10866  json.WriteString("Algorithm");
    10867  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10868  }
    10869  }
    10870  else
    10871  {
    10872  json.WriteString("PreferredBlockSize");
    10873  json.WriteNumber(m_PreferredBlockSize);
    10874  }
    10875 
    10876  json.WriteString("Blocks");
    10877  json.BeginObject();
    10878  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10879  {
    10880  json.BeginString();
    10881  json.ContinueString(m_Blocks[i]->GetId());
    10882  json.EndString();
    10883 
    10884  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10885  }
    10886  json.EndObject();
    10887 
    10888  json.EndObject();
    10889 }
    10890 
    10891 #endif // #if VMA_STATS_STRING_ENABLED
    10892 
    10893 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10894  VmaAllocator hAllocator,
    10895  uint32_t currentFrameIndex)
    10896 {
    10897  if(m_pDefragmentator == VMA_NULL)
    10898  {
    10899  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10900  hAllocator,
    10901  this,
    10902  currentFrameIndex);
    10903  }
    10904 
    10905  return m_pDefragmentator;
    10906 }
    10907 
    10908 VkResult VmaBlockVector::Defragment(
    10909  VmaDefragmentationStats* pDefragmentationStats,
    10910  VkDeviceSize& maxBytesToMove,
    10911  uint32_t& maxAllocationsToMove)
    10912 {
    10913  if(m_pDefragmentator == VMA_NULL)
    10914  {
    10915  return VK_SUCCESS;
    10916  }
    10917 
    10918  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10919 
    10920  // Defragment.
    10921  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10922 
    10923  // Accumulate statistics.
    10924  if(pDefragmentationStats != VMA_NULL)
    10925  {
    10926  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10927  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10928  pDefragmentationStats->bytesMoved += bytesMoved;
    10929  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10930  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10931  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10932  maxBytesToMove -= bytesMoved;
    10933  maxAllocationsToMove -= allocationsMoved;
    10934  }
    10935 
    10936  // Free empty blocks.
    10937  m_HasEmptyBlock = false;
    10938  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10939  {
    10940  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10941  if(pBlock->m_pMetadata->IsEmpty())
    10942  {
    10943  if(m_Blocks.size() > m_MinBlockCount)
    10944  {
    10945  if(pDefragmentationStats != VMA_NULL)
    10946  {
    10947  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10948  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10949  }
    10950 
    10951  VmaVectorRemove(m_Blocks, blockIndex);
    10952  pBlock->Destroy(m_hAllocator);
    10953  vma_delete(m_hAllocator, pBlock);
    10954  }
    10955  else
    10956  {
    10957  m_HasEmptyBlock = true;
    10958  }
    10959  }
    10960  }
    10961 
    10962  return result;
    10963 }
    10964 
    10965 void VmaBlockVector::DestroyDefragmentator()
    10966 {
    10967  if(m_pDefragmentator != VMA_NULL)
    10968  {
    10969  vma_delete(m_hAllocator, m_pDefragmentator);
    10970  m_pDefragmentator = VMA_NULL;
    10971  }
    10972 }
    10973 
    10974 void VmaBlockVector::MakePoolAllocationsLost(
    10975  uint32_t currentFrameIndex,
    10976  size_t* pLostAllocationCount)
    10977 {
    10978  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10979  size_t lostAllocationCount = 0;
    10980  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10981  {
    10982  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10983  VMA_ASSERT(pBlock);
    10984  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10985  }
    10986  if(pLostAllocationCount != VMA_NULL)
    10987  {
    10988  *pLostAllocationCount = lostAllocationCount;
    10989  }
    10990 }
    10991 
    10992 VkResult VmaBlockVector::CheckCorruption()
    10993 {
    10994  if(!IsCorruptionDetectionEnabled())
    10995  {
    10996  return VK_ERROR_FEATURE_NOT_PRESENT;
    10997  }
    10998 
    10999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11000  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11001  {
    11002  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11003  VMA_ASSERT(pBlock);
    11004  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11005  if(res != VK_SUCCESS)
    11006  {
    11007  return res;
    11008  }
    11009  }
    11010  return VK_SUCCESS;
    11011 }
    11012 
    11013 void VmaBlockVector::AddStats(VmaStats* pStats)
    11014 {
    11015  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11016  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11017 
    11018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11019 
    11020  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11021  {
    11022  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11023  VMA_ASSERT(pBlock);
    11024  VMA_HEAVY_ASSERT(pBlock->Validate());
    11025  VmaStatInfo allocationStatInfo;
    11026  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11027  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11028  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11029  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11030  }
    11031 }
    11032 
    11034 // VmaDefragmentator members definition
    11035 
    11036 VmaDefragmentator::VmaDefragmentator(
    11037  VmaAllocator hAllocator,
    11038  VmaBlockVector* pBlockVector,
    11039  uint32_t currentFrameIndex) :
    11040  m_hAllocator(hAllocator),
    11041  m_pBlockVector(pBlockVector),
    11042  m_CurrentFrameIndex(currentFrameIndex),
    11043  m_BytesMoved(0),
    11044  m_AllocationsMoved(0),
    11045  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11046  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11047 {
    11048  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11049 }
    11050 
    11051 VmaDefragmentator::~VmaDefragmentator()
    11052 {
    11053  for(size_t i = m_Blocks.size(); i--; )
    11054  {
    11055  vma_delete(m_hAllocator, m_Blocks[i]);
    11056  }
    11057 }
    11058 
    11059 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11060 {
    11061  AllocationInfo allocInfo;
    11062  allocInfo.m_hAllocation = hAlloc;
    11063  allocInfo.m_pChanged = pChanged;
    11064  m_Allocations.push_back(allocInfo);
    11065 }
    11066 
    11067 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11068 {
    11069  // It has already been mapped for defragmentation.
    11070  if(m_pMappedDataForDefragmentation)
    11071  {
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return VK_SUCCESS;
    11074  }
    11075 
    11076  // It is originally mapped.
    11077  if(m_pBlock->GetMappedData())
    11078  {
    11079  *ppMappedData = m_pBlock->GetMappedData();
    11080  return VK_SUCCESS;
    11081  }
    11082 
    11083  // Map on first usage.
    11084  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11085  *ppMappedData = m_pMappedDataForDefragmentation;
    11086  return res;
    11087 }
    11088 
    11089 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11090 {
    11091  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11092  {
    11093  m_pBlock->Unmap(hAllocator, 1);
    11094  }
    11095 }
    11096 
    11097 VkResult VmaDefragmentator::DefragmentRound(
    11098  VkDeviceSize maxBytesToMove,
    11099  uint32_t maxAllocationsToMove)
    11100 {
    11101  if(m_Blocks.empty())
    11102  {
    11103  return VK_SUCCESS;
    11104  }
    11105 
    11106  size_t srcBlockIndex = m_Blocks.size() - 1;
    11107  size_t srcAllocIndex = SIZE_MAX;
    11108  for(;;)
    11109  {
    11110  // 1. Find next allocation to move.
    11111  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11112  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11113  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11114  {
    11115  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11116  {
    11117  // Finished: no more allocations to process.
    11118  if(srcBlockIndex == 0)
    11119  {
    11120  return VK_SUCCESS;
    11121  }
    11122  else
    11123  {
    11124  --srcBlockIndex;
    11125  srcAllocIndex = SIZE_MAX;
    11126  }
    11127  }
    11128  else
    11129  {
    11130  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11131  }
    11132  }
    11133 
    11134  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11135  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11136 
    11137  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11138  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11139  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11140  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11141 
    11142  // 2. Try to find new place for this allocation in preceding or current block.
    11143  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11144  {
    11145  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11146  VmaAllocationRequest dstAllocRequest;
    11147  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11148  m_CurrentFrameIndex,
    11149  m_pBlockVector->GetFrameInUseCount(),
    11150  m_pBlockVector->GetBufferImageGranularity(),
    11151  size,
    11152  alignment,
    11153  false, // upperAddress
    11154  suballocType,
    11155  false, // canMakeOtherLost
    11157  &dstAllocRequest) &&
    11158  MoveMakesSense(
    11159  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11160  {
    11161  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11162 
    11163  // Reached limit on number of allocations or bytes to move.
    11164  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11165  (m_BytesMoved + size > maxBytesToMove))
    11166  {
    11167  return VK_INCOMPLETE;
    11168  }
    11169 
    11170  void* pDstMappedData = VMA_NULL;
    11171  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11172  if(res != VK_SUCCESS)
    11173  {
    11174  return res;
    11175  }
    11176 
    11177  void* pSrcMappedData = VMA_NULL;
    11178  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11179  if(res != VK_SUCCESS)
    11180  {
    11181  return res;
    11182  }
    11183 
    11184  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11185  memcpy(
    11186  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11187  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11188  static_cast<size_t>(size));
    11189 
    11190  if(VMA_DEBUG_MARGIN > 0)
    11191  {
    11192  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11193  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11194  }
    11195 
    11196  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11197  dstAllocRequest,
    11198  suballocType,
    11199  size,
    11200  false, // upperAddress
    11201  allocInfo.m_hAllocation);
    11202  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11203 
    11204  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11205 
    11206  if(allocInfo.m_pChanged != VMA_NULL)
    11207  {
    11208  *allocInfo.m_pChanged = VK_TRUE;
    11209  }
    11210 
    11211  ++m_AllocationsMoved;
    11212  m_BytesMoved += size;
    11213 
    11214  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11215 
    11216  break;
    11217  }
    11218  }
    11219 
    11220  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11221 
    11222  if(srcAllocIndex > 0)
    11223  {
    11224  --srcAllocIndex;
    11225  }
    11226  else
    11227  {
    11228  if(srcBlockIndex > 0)
    11229  {
    11230  --srcBlockIndex;
    11231  srcAllocIndex = SIZE_MAX;
    11232  }
    11233  else
    11234  {
    11235  return VK_SUCCESS;
    11236  }
    11237  }
    11238  }
    11239 }
    11240 
    11241 VkResult VmaDefragmentator::Defragment(
    11242  VkDeviceSize maxBytesToMove,
    11243  uint32_t maxAllocationsToMove)
    11244 {
    11245  if(m_Allocations.empty())
    11246  {
    11247  return VK_SUCCESS;
    11248  }
    11249 
    11250  // Create block info for each block.
    11251  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11253  {
    11254  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11255  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11256  m_Blocks.push_back(pBlockInfo);
    11257  }
    11258 
    11259  // Sort them by m_pBlock pointer value.
    11260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11261 
    11262  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11263  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11264  {
    11265  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11266  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11267  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11268  {
    11269  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11270  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11271  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11272  {
    11273  (*it)->m_Allocations.push_back(allocInfo);
    11274  }
    11275  else
    11276  {
    11277  VMA_ASSERT(0);
    11278  }
    11279  }
    11280  }
    11281  m_Allocations.clear();
    11282 
    11283  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11284  {
    11285  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11286  pBlockInfo->CalcHasNonMovableAllocations();
    11287  pBlockInfo->SortAllocationsBySizeDescecnding();
    11288  }
    11289 
    11290  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11292 
    11293  // Execute defragmentation rounds (the main part).
    11294  VkResult result = VK_SUCCESS;
    11295  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11296  {
    11297  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11298  }
    11299 
    11300  // Unmap blocks that were mapped for defragmentation.
    11301  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11302  {
    11303  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11304  }
    11305 
    11306  return result;
    11307 }
    11308 
    11309 bool VmaDefragmentator::MoveMakesSense(
    11310  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11311  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11312 {
    11313  if(dstBlockIndex < srcBlockIndex)
    11314  {
    11315  return true;
    11316  }
    11317  if(dstBlockIndex > srcBlockIndex)
    11318  {
    11319  return false;
    11320  }
    11321  if(dstOffset < srcOffset)
    11322  {
    11323  return true;
    11324  }
    11325  return false;
    11326 }
    11327 
    11329 // VmaRecorder
    11330 
    11331 #if VMA_RECORDING_ENABLED
    11332 
    11333 VmaRecorder::VmaRecorder() :
    11334  m_UseMutex(true),
    11335  m_Flags(0),
    11336  m_File(VMA_NULL),
    11337  m_Freq(INT64_MAX),
    11338  m_StartCounter(INT64_MAX)
    11339 {
    11340 }
    11341 
    11342 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11343 {
    11344  m_UseMutex = useMutex;
    11345  m_Flags = settings.flags;
    11346 
    11347  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11348  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11349 
    11350  // Open file for writing.
    11351  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11352  if(err != 0)
    11353  {
    11354  return VK_ERROR_INITIALIZATION_FAILED;
    11355  }
    11356 
    11357  // Write header.
    11358  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11359  fprintf(m_File, "%s\n", "1,3");
    11360 
    11361  return VK_SUCCESS;
    11362 }
    11363 
    11364 VmaRecorder::~VmaRecorder()
    11365 {
    11366  if(m_File != VMA_NULL)
    11367  {
    11368  fclose(m_File);
    11369  }
    11370 }
    11371 
    11372 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11373 {
    11374  CallParams callParams;
    11375  GetBasicParams(callParams);
    11376 
    11377  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11378  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11379  Flush();
    11380 }
    11381 
    11382 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11383 {
    11384  CallParams callParams;
    11385  GetBasicParams(callParams);
    11386 
    11387  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11388  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11389  Flush();
    11390 }
    11391 
    11392 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11393 {
    11394  CallParams callParams;
    11395  GetBasicParams(callParams);
    11396 
    11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11398  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11399  createInfo.memoryTypeIndex,
    11400  createInfo.flags,
    11401  createInfo.blockSize,
    11402  (uint64_t)createInfo.minBlockCount,
    11403  (uint64_t)createInfo.maxBlockCount,
    11404  createInfo.frameInUseCount,
    11405  pool);
    11406  Flush();
    11407 }
    11408 
    11409 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11410 {
    11411  CallParams callParams;
    11412  GetBasicParams(callParams);
    11413 
    11414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11415  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11416  pool);
    11417  Flush();
    11418 }
    11419 
    11420 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11421  const VkMemoryRequirements& vkMemReq,
    11422  const VmaAllocationCreateInfo& createInfo,
    11423  VmaAllocation allocation)
    11424 {
    11425  CallParams callParams;
    11426  GetBasicParams(callParams);
    11427 
    11428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11429  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11430  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11431  vkMemReq.size,
    11432  vkMemReq.alignment,
    11433  vkMemReq.memoryTypeBits,
    11434  createInfo.flags,
    11435  createInfo.usage,
    11436  createInfo.requiredFlags,
    11437  createInfo.preferredFlags,
    11438  createInfo.memoryTypeBits,
    11439  createInfo.pool,
    11440  allocation,
    11441  userDataStr.GetString());
    11442  Flush();
    11443 }
    11444 
    11445 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11446  const VkMemoryRequirements& vkMemReq,
    11447  bool requiresDedicatedAllocation,
    11448  bool prefersDedicatedAllocation,
    11449  const VmaAllocationCreateInfo& createInfo,
    11450  VmaAllocation allocation)
    11451 {
    11452  CallParams callParams;
    11453  GetBasicParams(callParams);
    11454 
    11455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11456  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11457  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11458  vkMemReq.size,
    11459  vkMemReq.alignment,
    11460  vkMemReq.memoryTypeBits,
    11461  requiresDedicatedAllocation ? 1 : 0,
    11462  prefersDedicatedAllocation ? 1 : 0,
    11463  createInfo.flags,
    11464  createInfo.usage,
    11465  createInfo.requiredFlags,
    11466  createInfo.preferredFlags,
    11467  createInfo.memoryTypeBits,
    11468  createInfo.pool,
    11469  allocation,
    11470  userDataStr.GetString());
    11471  Flush();
    11472 }
    11473 
    11474 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11475  const VkMemoryRequirements& vkMemReq,
    11476  bool requiresDedicatedAllocation,
    11477  bool prefersDedicatedAllocation,
    11478  const VmaAllocationCreateInfo& createInfo,
    11479  VmaAllocation allocation)
    11480 {
    11481  CallParams callParams;
    11482  GetBasicParams(callParams);
    11483 
    11484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11487  vkMemReq.size,
    11488  vkMemReq.alignment,
    11489  vkMemReq.memoryTypeBits,
    11490  requiresDedicatedAllocation ? 1 : 0,
    11491  prefersDedicatedAllocation ? 1 : 0,
    11492  createInfo.flags,
    11493  createInfo.usage,
    11494  createInfo.requiredFlags,
    11495  createInfo.preferredFlags,
    11496  createInfo.memoryTypeBits,
    11497  createInfo.pool,
    11498  allocation,
    11499  userDataStr.GetString());
    11500  Flush();
    11501 }
    11502 
    11503 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11504  VmaAllocation allocation)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11511  allocation);
    11512  Flush();
    11513 }
    11514 
    11515 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11516  VmaAllocation allocation,
    11517  const void* pUserData)
    11518 {
    11519  CallParams callParams;
    11520  GetBasicParams(callParams);
    11521 
    11522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11523  UserDataString userDataStr(
    11524  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11525  pUserData);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation,
    11528  userDataStr.GetString());
    11529  Flush();
    11530 }
    11531 
    11532 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11533  VmaAllocation allocation)
    11534 {
    11535  CallParams callParams;
    11536  GetBasicParams(callParams);
    11537 
    11538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11539  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11540  allocation);
    11541  Flush();
    11542 }
    11543 
    11544 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11545  VmaAllocation allocation)
    11546 {
    11547  CallParams callParams;
    11548  GetBasicParams(callParams);
    11549 
    11550  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11551  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11552  allocation);
    11553  Flush();
    11554 }
    11555 
    11556 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11557  VmaAllocation allocation)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11564  allocation);
    11565  Flush();
    11566 }
    11567 
    11568 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11569  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11570 {
    11571  CallParams callParams;
    11572  GetBasicParams(callParams);
    11573 
    11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11575  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11576  allocation,
    11577  offset,
    11578  size);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11590  allocation,
    11591  offset,
    11592  size);
    11593  Flush();
    11594 }
    11595 
    11596 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11597  const VkBufferCreateInfo& bufCreateInfo,
    11598  const VmaAllocationCreateInfo& allocCreateInfo,
    11599  VmaAllocation allocation)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11607  bufCreateInfo.flags,
    11608  bufCreateInfo.size,
    11609  bufCreateInfo.usage,
    11610  bufCreateInfo.sharingMode,
    11611  allocCreateInfo.flags,
    11612  allocCreateInfo.usage,
    11613  allocCreateInfo.requiredFlags,
    11614  allocCreateInfo.preferredFlags,
    11615  allocCreateInfo.memoryTypeBits,
    11616  allocCreateInfo.pool,
    11617  allocation,
    11618  userDataStr.GetString());
    11619  Flush();
    11620 }
    11621 
    11622 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11623  const VkImageCreateInfo& imageCreateInfo,
    11624  const VmaAllocationCreateInfo& allocCreateInfo,
    11625  VmaAllocation allocation)
    11626 {
    11627  CallParams callParams;
    11628  GetBasicParams(callParams);
    11629 
    11630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11631  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11632  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11633  imageCreateInfo.flags,
    11634  imageCreateInfo.imageType,
    11635  imageCreateInfo.format,
    11636  imageCreateInfo.extent.width,
    11637  imageCreateInfo.extent.height,
    11638  imageCreateInfo.extent.depth,
    11639  imageCreateInfo.mipLevels,
    11640  imageCreateInfo.arrayLayers,
    11641  imageCreateInfo.samples,
    11642  imageCreateInfo.tiling,
    11643  imageCreateInfo.usage,
    11644  imageCreateInfo.sharingMode,
    11645  imageCreateInfo.initialLayout,
    11646  allocCreateInfo.flags,
    11647  allocCreateInfo.usage,
    11648  allocCreateInfo.requiredFlags,
    11649  allocCreateInfo.preferredFlags,
    11650  allocCreateInfo.memoryTypeBits,
    11651  allocCreateInfo.pool,
    11652  allocation,
    11653  userDataStr.GetString());
    11654  Flush();
    11655 }
    11656 
    11657 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11658  VmaAllocation allocation)
    11659 {
    11660  CallParams callParams;
    11661  GetBasicParams(callParams);
    11662 
    11663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11665  allocation);
    11666  Flush();
    11667 }
    11668 
    11669 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11677  allocation);
    11678  Flush();
    11679 }
    11680 
    11681 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11682  VmaAllocation allocation)
    11683 {
    11684  CallParams callParams;
    11685  GetBasicParams(callParams);
    11686 
    11687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11688  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11689  allocation);
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11706  VmaPool pool)
    11707 {
    11708  CallParams callParams;
    11709  GetBasicParams(callParams);
    11710 
    11711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11712  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11713  pool);
    11714  Flush();
    11715 }
    11716 
    11717 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11718 {
    11719  if(pUserData != VMA_NULL)
    11720  {
    11721  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11722  {
    11723  m_Str = (const char*)pUserData;
    11724  }
    11725  else
    11726  {
    11727  sprintf_s(m_PtrStr, "%p", pUserData);
    11728  m_Str = m_PtrStr;
    11729  }
    11730  }
    11731  else
    11732  {
    11733  m_Str = "";
    11734  }
    11735 }
    11736 
    11737 void VmaRecorder::WriteConfiguration(
    11738  const VkPhysicalDeviceProperties& devProps,
    11739  const VkPhysicalDeviceMemoryProperties& memProps,
    11740  bool dedicatedAllocationExtensionEnabled)
    11741 {
    11742  fprintf(m_File, "Config,Begin\n");
    11743 
    11744  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11745  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11746  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11747  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11748  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11749  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11750 
    11751  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11752  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11753  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11754 
    11755  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11756  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11757  {
    11758  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11759  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11760  }
    11761  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11762  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11763  {
    11764  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11765  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11766  }
    11767 
    11768  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11769 
    11770  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11771  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11772  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11773  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11774  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11775  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11776  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11777  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11778  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11779 
    11780  fprintf(m_File, "Config,End\n");
    11781 }
    11782 
    11783 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11784 {
    11785  outParams.threadId = GetCurrentThreadId();
    11786 
    11787  LARGE_INTEGER counter;
    11788  QueryPerformanceCounter(&counter);
    11789  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11790 }
    11791 
    11792 void VmaRecorder::Flush()
    11793 {
    11794  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11795  {
    11796  fflush(m_File);
    11797  }
    11798 }
    11799 
    11800 #endif // #if VMA_RECORDING_ENABLED
    11801 
    11803 // VmaAllocator_T
    11804 
    11805 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11806  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11807  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11808  m_hDevice(pCreateInfo->device),
    11809  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11810  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11811  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11812  m_PreferredLargeHeapBlockSize(0),
    11813  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11814  m_CurrentFrameIndex(0),
    11815  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11816  m_NextPoolId(0)
    11818  ,m_pRecorder(VMA_NULL)
    11819 #endif
    11820 {
    11821  if(VMA_DEBUG_DETECT_CORRUPTION)
    11822  {
    11823  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11824  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11825  }
    11826 
    11827  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11828 
    11829 #if !(VMA_DEDICATED_ALLOCATION)
    11831  {
    11832  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11833  }
    11834 #endif
    11835 
    11836  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11837  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11838  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11839 
    11840  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11841  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11842 
    11843  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11844  {
    11845  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11846  }
    11847 
    11848  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11849  {
    11850  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11851  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11852  }
    11853 
    11854  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11855 
    11856  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11857  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11858 
    11859  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11860  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11861  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11862  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11863 
    11864  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11865  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11866 
    11867  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11868  {
    11869  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11870  {
    11871  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11872  if(limit != VK_WHOLE_SIZE)
    11873  {
    11874  m_HeapSizeLimit[heapIndex] = limit;
    11875  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11876  {
    11877  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11878  }
    11879  }
    11880  }
    11881  }
    11882 
    11883  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11884  {
    11885  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11886 
    11887  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11888  this,
    11889  memTypeIndex,
    11890  preferredBlockSize,
    11891  0,
    11892  SIZE_MAX,
    11893  GetBufferImageGranularity(),
    11894  pCreateInfo->frameInUseCount,
    11895  false, // isCustomPool
    11896  false, // explicitBlockSize
    11897  false); // linearAlgorithm
    11898  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11899  // becase minBlockCount is 0.
    11900  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11901 
    11902  }
    11903 }
    11904 
    11905 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11906 {
    11907  VkResult res = VK_SUCCESS;
    11908 
    11909  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11910  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11911  {
    11912 #if VMA_RECORDING_ENABLED
    11913  m_pRecorder = vma_new(this, VmaRecorder)();
    11914  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11915  if(res != VK_SUCCESS)
    11916  {
    11917  return res;
    11918  }
    11919  m_pRecorder->WriteConfiguration(
    11920  m_PhysicalDeviceProperties,
    11921  m_MemProps,
    11922  m_UseKhrDedicatedAllocation);
    11923  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11924 #else
    11925  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11926  return VK_ERROR_FEATURE_NOT_PRESENT;
    11927 #endif
    11928  }
    11929 
    11930  return res;
    11931 }
    11932 
    11933 VmaAllocator_T::~VmaAllocator_T()
    11934 {
    11935 #if VMA_RECORDING_ENABLED
    11936  if(m_pRecorder != VMA_NULL)
    11937  {
    11938  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11939  vma_delete(this, m_pRecorder);
    11940  }
    11941 #endif
    11942 
    11943  VMA_ASSERT(m_Pools.empty());
    11944 
    11945  for(size_t i = GetMemoryTypeCount(); i--; )
    11946  {
    11947  vma_delete(this, m_pDedicatedAllocations[i]);
    11948  vma_delete(this, m_pBlockVectors[i]);
    11949  }
    11950 }
    11951 
    11952 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11953 {
    11954 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11955  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11956  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11957  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11958  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11959  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11960  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11961  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11962  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11963  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11964  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11966  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11967  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11968  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11969  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11970  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11971 #if VMA_DEDICATED_ALLOCATION
    11972  if(m_UseKhrDedicatedAllocation)
    11973  {
    11974  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11975  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11976  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11977  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11978  }
    11979 #endif // #if VMA_DEDICATED_ALLOCATION
    11980 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11981 
    11982 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11983  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11984 
    11985  if(pVulkanFunctions != VMA_NULL)
    11986  {
    11987  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11988  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11989  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11990  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11991  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11992  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11993  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11994  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11995  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11996  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11997  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11998  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11999  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12000  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12001  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12002  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12003 #if VMA_DEDICATED_ALLOCATION
    12004  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12005  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12006 #endif
    12007  }
    12008 
    12009 #undef VMA_COPY_IF_NOT_NULL
    12010 
    12011  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12012  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12013  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12021  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12022  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12025  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12026  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12027  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12028  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12029 #if VMA_DEDICATED_ALLOCATION
    12030  if(m_UseKhrDedicatedAllocation)
    12031  {
    12032  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12033  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12034  }
    12035 #endif
    12036 }
    12037 
    12038 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12039 {
    12040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12041  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12042  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12043  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12044 }
    12045 
    12046 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12047  VkDeviceSize size,
    12048  VkDeviceSize alignment,
    12049  bool dedicatedAllocation,
    12050  VkBuffer dedicatedBuffer,
    12051  VkImage dedicatedImage,
    12052  const VmaAllocationCreateInfo& createInfo,
    12053  uint32_t memTypeIndex,
    12054  VmaSuballocationType suballocType,
    12055  VmaAllocation* pAllocation)
    12056 {
    12057  VMA_ASSERT(pAllocation != VMA_NULL);
    12058  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12059 
    12060  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12061 
    12062  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12063  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12064  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12065  {
    12066  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12067  }
    12068 
    12069  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12070  VMA_ASSERT(blockVector);
    12071 
    12072  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12073  bool preferDedicatedMemory =
    12074  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12075  dedicatedAllocation ||
    12076  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12077  size > preferredBlockSize / 2;
    12078 
    12079  if(preferDedicatedMemory &&
    12080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12081  finalCreateInfo.pool == VK_NULL_HANDLE)
    12082  {
    12084  }
    12085 
    12086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12087  {
    12088  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12089  {
    12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12091  }
    12092  else
    12093  {
    12094  return AllocateDedicatedMemory(
    12095  size,
    12096  suballocType,
    12097  memTypeIndex,
    12098  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12099  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12100  finalCreateInfo.pUserData,
    12101  dedicatedBuffer,
    12102  dedicatedImage,
    12103  pAllocation);
    12104  }
    12105  }
    12106  else
    12107  {
    12108  VkResult res = blockVector->Allocate(
    12109  VK_NULL_HANDLE, // hCurrentPool
    12110  m_CurrentFrameIndex.load(),
    12111  size,
    12112  alignment,
    12113  finalCreateInfo,
    12114  suballocType,
    12115  pAllocation);
    12116  if(res == VK_SUCCESS)
    12117  {
    12118  return res;
    12119  }
    12120 
    12121  // 5. Try dedicated memory.
    12122  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12123  {
    12124  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12125  }
    12126  else
    12127  {
    12128  res = AllocateDedicatedMemory(
    12129  size,
    12130  suballocType,
    12131  memTypeIndex,
    12132  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12133  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12134  finalCreateInfo.pUserData,
    12135  dedicatedBuffer,
    12136  dedicatedImage,
    12137  pAllocation);
    12138  if(res == VK_SUCCESS)
    12139  {
    12140  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12141  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12142  return VK_SUCCESS;
    12143  }
    12144  else
    12145  {
    12146  // Everything failed: Return error code.
    12147  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12148  return res;
    12149  }
    12150  }
    12151  }
    12152 }
    12153 
    12154 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12155  VkDeviceSize size,
    12156  VmaSuballocationType suballocType,
    12157  uint32_t memTypeIndex,
    12158  bool map,
    12159  bool isUserDataString,
    12160  void* pUserData,
    12161  VkBuffer dedicatedBuffer,
    12162  VkImage dedicatedImage,
    12163  VmaAllocation* pAllocation)
    12164 {
    12165  VMA_ASSERT(pAllocation);
    12166 
    12167  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12168  allocInfo.memoryTypeIndex = memTypeIndex;
    12169  allocInfo.allocationSize = size;
    12170 
    12171 #if VMA_DEDICATED_ALLOCATION
    12172  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12173  if(m_UseKhrDedicatedAllocation)
    12174  {
    12175  if(dedicatedBuffer != VK_NULL_HANDLE)
    12176  {
    12177  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12178  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12179  allocInfo.pNext = &dedicatedAllocInfo;
    12180  }
    12181  else if(dedicatedImage != VK_NULL_HANDLE)
    12182  {
    12183  dedicatedAllocInfo.image = dedicatedImage;
    12184  allocInfo.pNext = &dedicatedAllocInfo;
    12185  }
    12186  }
    12187 #endif // #if VMA_DEDICATED_ALLOCATION
    12188 
    12189  // Allocate VkDeviceMemory.
    12190  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12191  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12192  if(res < 0)
    12193  {
    12194  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12195  return res;
    12196  }
    12197 
    12198  void* pMappedData = VMA_NULL;
    12199  if(map)
    12200  {
    12201  res = (*m_VulkanFunctions.vkMapMemory)(
    12202  m_hDevice,
    12203  hMemory,
    12204  0,
    12205  VK_WHOLE_SIZE,
    12206  0,
    12207  &pMappedData);
    12208  if(res < 0)
    12209  {
    12210  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12211  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12212  return res;
    12213  }
    12214  }
    12215 
    12216  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12217  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12218  (*pAllocation)->SetUserData(this, pUserData);
    12219  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12220  {
    12221  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12222  }
    12223 
    12224  // Register it in m_pDedicatedAllocations.
    12225  {
    12226  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12227  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12228  VMA_ASSERT(pDedicatedAllocations);
    12229  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12230  }
    12231 
    12232  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12233 
    12234  return VK_SUCCESS;
    12235 }
    12236 
    12237 void VmaAllocator_T::GetBufferMemoryRequirements(
    12238  VkBuffer hBuffer,
    12239  VkMemoryRequirements& memReq,
    12240  bool& requiresDedicatedAllocation,
    12241  bool& prefersDedicatedAllocation) const
    12242 {
    12243 #if VMA_DEDICATED_ALLOCATION
    12244  if(m_UseKhrDedicatedAllocation)
    12245  {
    12246  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12247  memReqInfo.buffer = hBuffer;
    12248 
    12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12250 
    12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12252  memReq2.pNext = &memDedicatedReq;
    12253 
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12255 
    12256  memReq = memReq2.memoryRequirements;
    12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12259  }
    12260  else
    12261 #endif // #if VMA_DEDICATED_ALLOCATION
    12262  {
    12263  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12264  requiresDedicatedAllocation = false;
    12265  prefersDedicatedAllocation = false;
    12266  }
    12267 }
    12268 
    12269 void VmaAllocator_T::GetImageMemoryRequirements(
    12270  VkImage hImage,
    12271  VkMemoryRequirements& memReq,
    12272  bool& requiresDedicatedAllocation,
    12273  bool& prefersDedicatedAllocation) const
    12274 {
    12275 #if VMA_DEDICATED_ALLOCATION
    12276  if(m_UseKhrDedicatedAllocation)
    12277  {
    12278  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12279  memReqInfo.image = hImage;
    12280 
    12281  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12282 
    12283  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12284  memReq2.pNext = &memDedicatedReq;
    12285 
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12287 
    12288  memReq = memReq2.memoryRequirements;
    12289  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12290  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12291  }
    12292  else
    12293 #endif // #if VMA_DEDICATED_ALLOCATION
    12294  {
    12295  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12296  requiresDedicatedAllocation = false;
    12297  prefersDedicatedAllocation = false;
    12298  }
    12299 }
    12300 
    12301 VkResult VmaAllocator_T::AllocateMemory(
    12302  const VkMemoryRequirements& vkMemReq,
    12303  bool requiresDedicatedAllocation,
    12304  bool prefersDedicatedAllocation,
    12305  VkBuffer dedicatedBuffer,
    12306  VkImage dedicatedImage,
    12307  const VmaAllocationCreateInfo& createInfo,
    12308  VmaSuballocationType suballocType,
    12309  VmaAllocation* pAllocation)
    12310 {
    12311  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12312 
    12313  if(vkMemReq.size == 0)
    12314  {
    12315  return VK_ERROR_VALIDATION_FAILED_EXT;
    12316  }
    12317  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12318  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12325  {
    12326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12328  }
    12329  if(requiresDedicatedAllocation)
    12330  {
    12331  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12332  {
    12333  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12334  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12335  }
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12340  }
    12341  }
    12342  if((createInfo.pool != VK_NULL_HANDLE) &&
    12343  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12344  {
    12345  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12347  }
    12348 
    12349  if(createInfo.pool != VK_NULL_HANDLE)
    12350  {
    12351  const VkDeviceSize alignmentForPool = VMA_MAX(
    12352  vkMemReq.alignment,
    12353  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12354  return createInfo.pool->m_BlockVector.Allocate(
    12355  createInfo.pool,
    12356  m_CurrentFrameIndex.load(),
    12357  vkMemReq.size,
    12358  alignmentForPool,
    12359  createInfo,
    12360  suballocType,
    12361  pAllocation);
    12362  }
    12363  else
    12364  {
    12365  // Bit mask of memory Vulkan types acceptable for this allocation.
    12366  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12367  uint32_t memTypeIndex = UINT32_MAX;
    12368  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  VkDeviceSize alignmentForMemType = VMA_MAX(
    12372  vkMemReq.alignment,
    12373  GetMemoryTypeMinAlignment(memTypeIndex));
    12374 
    12375  res = AllocateMemoryOfType(
    12376  vkMemReq.size,
    12377  alignmentForMemType,
    12378  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12379  dedicatedBuffer,
    12380  dedicatedImage,
    12381  createInfo,
    12382  memTypeIndex,
    12383  suballocType,
    12384  pAllocation);
    12385  // Succeeded on first try.
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  return res;
    12389  }
    12390  // Allocation from this memory type failed. Try other compatible memory types.
    12391  else
    12392  {
    12393  for(;;)
    12394  {
    12395  // Remove old memTypeIndex from list of possibilities.
    12396  memoryTypeBits &= ~(1u << memTypeIndex);
    12397  // Find alternative memTypeIndex.
    12398  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  alignmentForMemType = VMA_MAX(
    12402  vkMemReq.alignment,
    12403  GetMemoryTypeMinAlignment(memTypeIndex));
    12404 
    12405  res = AllocateMemoryOfType(
    12406  vkMemReq.size,
    12407  alignmentForMemType,
    12408  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12409  dedicatedBuffer,
    12410  dedicatedImage,
    12411  createInfo,
    12412  memTypeIndex,
    12413  suballocType,
    12414  pAllocation);
    12415  // Allocation from this alternative memory type succeeded.
    12416  if(res == VK_SUCCESS)
    12417  {
    12418  return res;
    12419  }
    12420  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12421  }
    12422  // No other matching memory type index could be found.
    12423  else
    12424  {
    12425  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12427  }
    12428  }
    12429  }
    12430  }
    12431  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12432  else
    12433  return res;
    12434  }
    12435 }
    12436 
    12437 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12438 {
    12439  VMA_ASSERT(allocation);
    12440 
    12441  if(TouchAllocation(allocation))
    12442  {
    12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12444  {
    12445  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12446  }
    12447 
    12448  switch(allocation->GetType())
    12449  {
    12450  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12451  {
    12452  VmaBlockVector* pBlockVector = VMA_NULL;
    12453  VmaPool hPool = allocation->GetPool();
    12454  if(hPool != VK_NULL_HANDLE)
    12455  {
    12456  pBlockVector = &hPool->m_BlockVector;
    12457  }
    12458  else
    12459  {
    12460  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12461  pBlockVector = m_pBlockVectors[memTypeIndex];
    12462  }
    12463  pBlockVector->Free(allocation);
    12464  }
    12465  break;
    12466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12467  FreeDedicatedMemory(allocation);
    12468  break;
    12469  default:
    12470  VMA_ASSERT(0);
    12471  }
    12472  }
    12473 
    12474  allocation->SetUserData(this, VMA_NULL);
    12475  vma_delete(this, allocation);
    12476 }
    12477 
    12478 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12479 {
    12480  // Initialize.
    12481  InitStatInfo(pStats->total);
    12482  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12483  InitStatInfo(pStats->memoryType[i]);
    12484  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12485  InitStatInfo(pStats->memoryHeap[i]);
    12486 
    12487  // Process default pools.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12491  VMA_ASSERT(pBlockVector);
    12492  pBlockVector->AddStats(pStats);
    12493  }
    12494 
    12495  // Process custom pools.
    12496  {
    12497  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12498  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12499  {
    12500  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12501  }
    12502  }
    12503 
    12504  // Process dedicated allocations.
    12505  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12506  {
    12507  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12508  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12509  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12510  VMA_ASSERT(pDedicatedAllocVector);
    12511  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12512  {
    12513  VmaStatInfo allocationStatInfo;
    12514  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12515  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12516  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12517  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12518  }
    12519  }
    12520 
    12521  // Postprocess.
    12522  VmaPostprocessCalcStatInfo(pStats->total);
    12523  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12524  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12525  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12526  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12527 }
    12528 
    12529 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12530 
    12531 VkResult VmaAllocator_T::Defragment(
    12532  VmaAllocation* pAllocations,
    12533  size_t allocationCount,
    12534  VkBool32* pAllocationsChanged,
    12535  const VmaDefragmentationInfo* pDefragmentationInfo,
    12536  VmaDefragmentationStats* pDefragmentationStats)
    12537 {
    12538  if(pAllocationsChanged != VMA_NULL)
    12539  {
    12540  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12541  }
    12542  if(pDefragmentationStats != VMA_NULL)
    12543  {
    12544  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12545  }
    12546 
    12547  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12548 
    12549  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12550 
    12551  const size_t poolCount = m_Pools.size();
    12552 
    12553  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12554  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12555  {
    12556  VmaAllocation hAlloc = pAllocations[allocIndex];
    12557  VMA_ASSERT(hAlloc);
    12558  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12559  // DedicatedAlloc cannot be defragmented.
    12560  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12561  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12562  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12563  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12564  // Lost allocation cannot be defragmented.
    12565  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12566  {
    12567  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12568 
    12569  const VmaPool hAllocPool = hAlloc->GetPool();
    12570  // This allocation belongs to custom pool.
    12571  if(hAllocPool != VK_NULL_HANDLE)
    12572  {
    12573  // Pools with linear or buddy algorithm are not defragmented.
    12574  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12575  {
    12576  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12577  }
    12578  }
    12579  // This allocation belongs to general pool.
    12580  else
    12581  {
    12582  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12583  }
    12584 
    12585  if(pAllocBlockVector != VMA_NULL)
    12586  {
    12587  VmaDefragmentator* const pDefragmentator =
    12588  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12589  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12590  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12591  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12592  }
    12593  }
    12594  }
    12595 
    12596  VkResult result = VK_SUCCESS;
    12597 
    12598  // ======== Main processing.
    12599 
    12600  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12601  uint32_t maxAllocationsToMove = UINT32_MAX;
    12602  if(pDefragmentationInfo != VMA_NULL)
    12603  {
    12604  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12605  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12606  }
    12607 
    12608  // Process standard memory.
    12609  for(uint32_t memTypeIndex = 0;
    12610  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12611  ++memTypeIndex)
    12612  {
    12613  // Only HOST_VISIBLE memory types can be defragmented.
    12614  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12615  {
    12616  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12617  pDefragmentationStats,
    12618  maxBytesToMove,
    12619  maxAllocationsToMove);
    12620  }
    12621  }
    12622 
    12623  // Process custom pools.
    12624  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12625  {
    12626  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12627  pDefragmentationStats,
    12628  maxBytesToMove,
    12629  maxAllocationsToMove);
    12630  }
    12631 
    12632  // ======== Destroy defragmentators.
    12633 
    12634  // Process custom pools.
    12635  for(size_t poolIndex = poolCount; poolIndex--; )
    12636  {
    12637  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12638  }
    12639 
    12640  // Process standard memory.
    12641  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12642  {
    12643  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12644  {
    12645  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12646  }
    12647  }
    12648 
    12649  return result;
    12650 }
    12651 
    12652 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12653 {
    12654  if(hAllocation->CanBecomeLost())
    12655  {
    12656  /*
    12657  Warning: This is a carefully designed algorithm.
    12658  Do not modify unless you really know what you're doing :)
    12659  */
    12660  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12662  for(;;)
    12663  {
    12664  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12665  {
    12666  pAllocationInfo->memoryType = UINT32_MAX;
    12667  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12668  pAllocationInfo->offset = 0;
    12669  pAllocationInfo->size = hAllocation->GetSize();
    12670  pAllocationInfo->pMappedData = VMA_NULL;
    12671  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12672  return;
    12673  }
    12674  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12675  {
    12676  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12677  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12678  pAllocationInfo->offset = hAllocation->GetOffset();
    12679  pAllocationInfo->size = hAllocation->GetSize();
    12680  pAllocationInfo->pMappedData = VMA_NULL;
    12681  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12682  return;
    12683  }
    12684  else // Last use time earlier than current time.
    12685  {
    12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12687  {
    12688  localLastUseFrameIndex = localCurrFrameIndex;
    12689  }
    12690  }
    12691  }
    12692  }
    12693  else
    12694  {
    12695 #if VMA_STATS_STRING_ENABLED
    12696  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12697  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12698  for(;;)
    12699  {
    12700  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12701  if(localLastUseFrameIndex == localCurrFrameIndex)
    12702  {
    12703  break;
    12704  }
    12705  else // Last use time earlier than current time.
    12706  {
    12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12708  {
    12709  localLastUseFrameIndex = localCurrFrameIndex;
    12710  }
    12711  }
    12712  }
    12713 #endif
    12714 
    12715  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12716  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12717  pAllocationInfo->offset = hAllocation->GetOffset();
    12718  pAllocationInfo->size = hAllocation->GetSize();
    12719  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12720  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12721  }
    12722 }
    12723 
    12724 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12725 {
    12726  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12727  if(hAllocation->CanBecomeLost())
    12728  {
    12729  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12730  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12731  for(;;)
    12732  {
    12733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12734  {
    12735  return false;
    12736  }
    12737  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12738  {
    12739  return true;
    12740  }
    12741  else // Last use time earlier than current time.
    12742  {
    12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12744  {
    12745  localLastUseFrameIndex = localCurrFrameIndex;
    12746  }
    12747  }
    12748  }
    12749  }
    12750  else
    12751  {
    12752 #if VMA_STATS_STRING_ENABLED
    12753  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12754  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12755  for(;;)
    12756  {
    12757  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12758  if(localLastUseFrameIndex == localCurrFrameIndex)
    12759  {
    12760  break;
    12761  }
    12762  else // Last use time earlier than current time.
    12763  {
    12764  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12765  {
    12766  localLastUseFrameIndex = localCurrFrameIndex;
    12767  }
    12768  }
    12769  }
    12770 #endif
    12771 
    12772  return true;
    12773  }
    12774 }
    12775 
    12776 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12777 {
    12778  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12779 
    12780  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12781 
    12782  if(newCreateInfo.maxBlockCount == 0)
    12783  {
    12784  newCreateInfo.maxBlockCount = SIZE_MAX;
    12785  }
    12786  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12787  {
    12788  return VK_ERROR_INITIALIZATION_FAILED;
    12789  }
    12790 
    12791  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12792 
    12793  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12794 
    12795  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12796  if(res != VK_SUCCESS)
    12797  {
    12798  vma_delete(this, *pPool);
    12799  *pPool = VMA_NULL;
    12800  return res;
    12801  }
    12802 
    12803  // Add to m_Pools.
    12804  {
    12805  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12806  (*pPool)->SetId(m_NextPoolId++);
    12807  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12808  }
    12809 
    12810  return VK_SUCCESS;
    12811 }
    12812 
    12813 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12814 {
    12815  // Remove from m_Pools.
    12816  {
    12817  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12818  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12819  VMA_ASSERT(success && "Pool not found in Allocator.");
    12820  }
    12821 
    12822  vma_delete(this, pool);
    12823 }
    12824 
    12825 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12826 {
    12827  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12828 }
    12829 
    12830 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12831 {
    12832  m_CurrentFrameIndex.store(frameIndex);
    12833 }
    12834 
    12835 void VmaAllocator_T::MakePoolAllocationsLost(
    12836  VmaPool hPool,
    12837  size_t* pLostAllocationCount)
    12838 {
    12839  hPool->m_BlockVector.MakePoolAllocationsLost(
    12840  m_CurrentFrameIndex.load(),
    12841  pLostAllocationCount);
    12842 }
    12843 
    12844 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12845 {
    12846  return hPool->m_BlockVector.CheckCorruption();
    12847 }
    12848 
    12849 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12850 {
    12851  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12852 
    12853  // Process default pools.
    12854  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12855  {
    12856  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12857  {
    12858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12859  VMA_ASSERT(pBlockVector);
    12860  VkResult localRes = pBlockVector->CheckCorruption();
    12861  switch(localRes)
    12862  {
    12863  case VK_ERROR_FEATURE_NOT_PRESENT:
    12864  break;
    12865  case VK_SUCCESS:
    12866  finalRes = VK_SUCCESS;
    12867  break;
    12868  default:
    12869  return localRes;
    12870  }
    12871  }
    12872  }
    12873 
    12874  // Process custom pools.
    12875  {
    12876  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12877  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12878  {
    12879  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12880  {
    12881  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12882  switch(localRes)
    12883  {
    12884  case VK_ERROR_FEATURE_NOT_PRESENT:
    12885  break;
    12886  case VK_SUCCESS:
    12887  finalRes = VK_SUCCESS;
    12888  break;
    12889  default:
    12890  return localRes;
    12891  }
    12892  }
    12893  }
    12894  }
    12895 
    12896  return finalRes;
    12897 }
    12898 
    12899 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12900 {
    12901  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12902  (*pAllocation)->InitLost();
    12903 }
    12904 
    12905 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12906 {
    12907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12908 
    12909  VkResult res;
    12910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12911  {
    12912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12913  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  if(res == VK_SUCCESS)
    12917  {
    12918  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12919  }
    12920  }
    12921  else
    12922  {
    12923  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12924  }
    12925  }
    12926  else
    12927  {
    12928  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12929  }
    12930 
    12931  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12932  {
    12933  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12934  }
    12935 
    12936  return res;
    12937 }
    12938 
    12939 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12940 {
    12941  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12942  {
    12943  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12944  }
    12945 
    12946  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12947 
    12948  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12950  {
    12951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12952  m_HeapSizeLimit[heapIndex] += size;
    12953  }
    12954 }
    12955 
    12956 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12957 {
    12958  if(hAllocation->CanBecomeLost())
    12959  {
    12960  return VK_ERROR_MEMORY_MAP_FAILED;
    12961  }
    12962 
    12963  switch(hAllocation->GetType())
    12964  {
    12965  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12966  {
    12967  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12968  char *pBytes = VMA_NULL;
    12969  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12970  if(res == VK_SUCCESS)
    12971  {
    12972  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12973  hAllocation->BlockAllocMap();
    12974  }
    12975  return res;
    12976  }
    12977  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12978  return hAllocation->DedicatedAllocMap(this, ppData);
    12979  default:
    12980  VMA_ASSERT(0);
    12981  return VK_ERROR_MEMORY_MAP_FAILED;
    12982  }
    12983 }
    12984 
    12985 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12986 {
    12987  switch(hAllocation->GetType())
    12988  {
    12989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12990  {
    12991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12992  hAllocation->BlockAllocUnmap();
    12993  pBlock->Unmap(this, 1);
    12994  }
    12995  break;
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  hAllocation->DedicatedAllocUnmap(this);
    12998  break;
    12999  default:
    13000  VMA_ASSERT(0);
    13001  }
    13002 }
    13003 
    13004 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13005 {
    13006  VkResult res = VK_SUCCESS;
    13007  switch(hAllocation->GetType())
    13008  {
    13009  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13010  res = GetVulkanFunctions().vkBindBufferMemory(
    13011  m_hDevice,
    13012  hBuffer,
    13013  hAllocation->GetMemory(),
    13014  0); //memoryOffset
    13015  break;
    13016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13017  {
    13018  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13019  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13020  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13021  break;
    13022  }
    13023  default:
    13024  VMA_ASSERT(0);
    13025  }
    13026  return res;
    13027 }
    13028 
    13029 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13030 {
    13031  VkResult res = VK_SUCCESS;
    13032  switch(hAllocation->GetType())
    13033  {
    13034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13035  res = GetVulkanFunctions().vkBindImageMemory(
    13036  m_hDevice,
    13037  hImage,
    13038  hAllocation->GetMemory(),
    13039  0); //memoryOffset
    13040  break;
    13041  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13042  {
    13043  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13044  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13045  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13046  break;
    13047  }
    13048  default:
    13049  VMA_ASSERT(0);
    13050  }
    13051  return res;
    13052 }
    13053 
    13054 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13055  VmaAllocation hAllocation,
    13056  VkDeviceSize offset, VkDeviceSize size,
    13057  VMA_CACHE_OPERATION op)
    13058 {
    13059  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13060  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13061  {
    13062  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13063  VMA_ASSERT(offset <= allocationSize);
    13064 
    13065  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13066 
    13067  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13068  memRange.memory = hAllocation->GetMemory();
    13069 
    13070  switch(hAllocation->GetType())
    13071  {
    13072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  memRange.size = allocationSize - memRange.offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  memRange.size = VMA_MIN(
    13082  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13083  allocationSize - memRange.offset);
    13084  }
    13085  break;
    13086 
    13087  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13088  {
    13089  // 1. Still within this allocation.
    13090  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13091  if(size == VK_WHOLE_SIZE)
    13092  {
    13093  size = allocationSize - offset;
    13094  }
    13095  else
    13096  {
    13097  VMA_ASSERT(offset + size <= allocationSize);
    13098  }
    13099  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13100 
    13101  // 2. Adjust to whole block.
    13102  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13103  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13104  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13105  memRange.offset += allocationOffset;
    13106  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13107 
    13108  break;
    13109  }
    13110 
    13111  default:
    13112  VMA_ASSERT(0);
    13113  }
    13114 
    13115  switch(op)
    13116  {
    13117  case VMA_CACHE_FLUSH:
    13118  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13119  break;
    13120  case VMA_CACHE_INVALIDATE:
    13121  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13122  break;
    13123  default:
    13124  VMA_ASSERT(0);
    13125  }
    13126  }
    13127  // else: Just ignore this call.
    13128 }
    13129 
    13130 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13131 {
    13132  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13133 
    13134  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13135  {
    13136  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13137  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13138  VMA_ASSERT(pDedicatedAllocations);
    13139  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13140  VMA_ASSERT(success);
    13141  }
    13142 
    13143  VkDeviceMemory hMemory = allocation->GetMemory();
    13144 
    13145  /*
    13146  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13147  before vkFreeMemory.
    13148 
    13149  if(allocation->GetMappedData() != VMA_NULL)
    13150  {
    13151  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13152  }
    13153  */
    13154 
    13155  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13156 
    13157  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13158 }
    13159 
    13160 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13161 {
    13162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13163  !hAllocation->CanBecomeLost() &&
    13164  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13165  {
    13166  void* pData = VMA_NULL;
    13167  VkResult res = Map(hAllocation, &pData);
    13168  if(res == VK_SUCCESS)
    13169  {
    13170  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13171  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13172  Unmap(hAllocation);
    13173  }
    13174  else
    13175  {
    13176  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13177  }
    13178  }
    13179 }
    13180 
    13181 #if VMA_STATS_STRING_ENABLED
    13182 
    13183 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13184 {
    13185  bool dedicatedAllocationsStarted = false;
    13186  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13187  {
    13188  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13189  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13190  VMA_ASSERT(pDedicatedAllocVector);
    13191  if(pDedicatedAllocVector->empty() == false)
    13192  {
    13193  if(dedicatedAllocationsStarted == false)
    13194  {
    13195  dedicatedAllocationsStarted = true;
    13196  json.WriteString("DedicatedAllocations");
    13197  json.BeginObject();
    13198  }
    13199 
    13200  json.BeginString("Type ");
    13201  json.ContinueString(memTypeIndex);
    13202  json.EndString();
    13203 
    13204  json.BeginArray();
    13205 
    13206  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13207  {
    13208  json.BeginObject(true);
    13209  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13210  hAlloc->PrintParameters(json);
    13211  json.EndObject();
    13212  }
    13213 
    13214  json.EndArray();
    13215  }
    13216  }
    13217  if(dedicatedAllocationsStarted)
    13218  {
    13219  json.EndObject();
    13220  }
    13221 
    13222  {
    13223  bool allocationsStarted = false;
    13224  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13225  {
    13226  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13227  {
    13228  if(allocationsStarted == false)
    13229  {
    13230  allocationsStarted = true;
    13231  json.WriteString("DefaultPools");
    13232  json.BeginObject();
    13233  }
    13234 
    13235  json.BeginString("Type ");
    13236  json.ContinueString(memTypeIndex);
    13237  json.EndString();
    13238 
    13239  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13240  }
    13241  }
    13242  if(allocationsStarted)
    13243  {
    13244  json.EndObject();
    13245  }
    13246  }
    13247 
    13248  // Custom pools
    13249  {
    13250  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13251  const size_t poolCount = m_Pools.size();
    13252  if(poolCount > 0)
    13253  {
    13254  json.WriteString("Pools");
    13255  json.BeginObject();
    13256  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13257  {
    13258  json.BeginString();
    13259  json.ContinueString(m_Pools[poolIndex]->GetId());
    13260  json.EndString();
    13261 
    13262  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13263  }
    13264  json.EndObject();
    13265  }
    13266  }
    13267 }
    13268 
    13269 #endif // #if VMA_STATS_STRING_ENABLED
    13270 
    13272 // Public interface
    13273 
    13274 VkResult vmaCreateAllocator(
    13275  const VmaAllocatorCreateInfo* pCreateInfo,
    13276  VmaAllocator* pAllocator)
    13277 {
    13278  VMA_ASSERT(pCreateInfo && pAllocator);
    13279  VMA_DEBUG_LOG("vmaCreateAllocator");
    13280  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13281  return (*pAllocator)->Init(pCreateInfo);
    13282 }
    13283 
    13284 void vmaDestroyAllocator(
    13285  VmaAllocator allocator)
    13286 {
    13287  if(allocator != VK_NULL_HANDLE)
    13288  {
    13289  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13290  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13291  vma_delete(&allocationCallbacks, allocator);
    13292  }
    13293 }
    13294 
    13296  VmaAllocator allocator,
    13297  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13298 {
    13299  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13300  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13301 }
    13302 
    13304  VmaAllocator allocator,
    13305  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13306 {
    13307  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13308  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13309 }
    13310 
    13312  VmaAllocator allocator,
    13313  uint32_t memoryTypeIndex,
    13314  VkMemoryPropertyFlags* pFlags)
    13315 {
    13316  VMA_ASSERT(allocator && pFlags);
    13317  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13318  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13319 }
    13320 
    13322  VmaAllocator allocator,
    13323  uint32_t frameIndex)
    13324 {
    13325  VMA_ASSERT(allocator);
    13326  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13327 
    13328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13329 
    13330  allocator->SetCurrentFrameIndex(frameIndex);
    13331 }
    13332 
    13333 void vmaCalculateStats(
    13334  VmaAllocator allocator,
    13335  VmaStats* pStats)
    13336 {
    13337  VMA_ASSERT(allocator && pStats);
    13338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13339  allocator->CalculateStats(pStats);
    13340 }
    13341 
    13342 #if VMA_STATS_STRING_ENABLED
    13343 
    13344 void vmaBuildStatsString(
    13345  VmaAllocator allocator,
    13346  char** ppStatsString,
    13347  VkBool32 detailedMap)
    13348 {
    13349  VMA_ASSERT(allocator && ppStatsString);
    13350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13351 
    13352  VmaStringBuilder sb(allocator);
    13353  {
    13354  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13355  json.BeginObject();
    13356 
    13357  VmaStats stats;
    13358  allocator->CalculateStats(&stats);
    13359 
    13360  json.WriteString("Total");
    13361  VmaPrintStatInfo(json, stats.total);
    13362 
    13363  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13364  {
    13365  json.BeginString("Heap ");
    13366  json.ContinueString(heapIndex);
    13367  json.EndString();
    13368  json.BeginObject();
    13369 
    13370  json.WriteString("Size");
    13371  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13372 
    13373  json.WriteString("Flags");
    13374  json.BeginArray(true);
    13375  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13376  {
    13377  json.WriteString("DEVICE_LOCAL");
    13378  }
    13379  json.EndArray();
    13380 
    13381  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13382  {
    13383  json.WriteString("Stats");
    13384  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13385  }
    13386 
    13387  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13388  {
    13389  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13390  {
    13391  json.BeginString("Type ");
    13392  json.ContinueString(typeIndex);
    13393  json.EndString();
    13394 
    13395  json.BeginObject();
    13396 
    13397  json.WriteString("Flags");
    13398  json.BeginArray(true);
    13399  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13400  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13401  {
    13402  json.WriteString("DEVICE_LOCAL");
    13403  }
    13404  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13405  {
    13406  json.WriteString("HOST_VISIBLE");
    13407  }
    13408  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13409  {
    13410  json.WriteString("HOST_COHERENT");
    13411  }
    13412  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13413  {
    13414  json.WriteString("HOST_CACHED");
    13415  }
    13416  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13417  {
    13418  json.WriteString("LAZILY_ALLOCATED");
    13419  }
    13420  json.EndArray();
    13421 
    13422  if(stats.memoryType[typeIndex].blockCount > 0)
    13423  {
    13424  json.WriteString("Stats");
    13425  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13426  }
    13427 
    13428  json.EndObject();
    13429  }
    13430  }
    13431 
    13432  json.EndObject();
    13433  }
    13434  if(detailedMap == VK_TRUE)
    13435  {
    13436  allocator->PrintDetailedMap(json);
    13437  }
    13438 
    13439  json.EndObject();
    13440  }
    13441 
    13442  const size_t len = sb.GetLength();
    13443  char* const pChars = vma_new_array(allocator, char, len + 1);
    13444  if(len > 0)
    13445  {
    13446  memcpy(pChars, sb.GetData(), len);
    13447  }
    13448  pChars[len] = '\0';
    13449  *ppStatsString = pChars;
    13450 }
    13451 
    13452 void vmaFreeStatsString(
    13453  VmaAllocator allocator,
    13454  char* pStatsString)
    13455 {
    13456  if(pStatsString != VMA_NULL)
    13457  {
    13458  VMA_ASSERT(allocator);
    13459  size_t len = strlen(pStatsString);
    13460  vma_delete_array(allocator, pStatsString, len + 1);
    13461  }
    13462 }
    13463 
    13464 #endif // #if VMA_STATS_STRING_ENABLED
    13465 
    13466 /*
    13467 This function is not protected by any mutex because it just reads immutable data.
    13468 */
    13469 VkResult vmaFindMemoryTypeIndex(
    13470  VmaAllocator allocator,
    13471  uint32_t memoryTypeBits,
    13472  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13473  uint32_t* pMemoryTypeIndex)
    13474 {
    13475  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13476  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13477  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13478 
    13479  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13480  {
    13481  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13482  }
    13483 
    13484  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13485  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13486 
    13487  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13488  if(mapped)
    13489  {
    13490  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13491  }
    13492 
    13493  // Convert usage to requiredFlags and preferredFlags.
    13494  switch(pAllocationCreateInfo->usage)
    13495  {
    13497  break;
    13499  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13500  {
    13501  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13502  }
    13503  break;
    13505  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13506  break;
    13508  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13509  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13510  {
    13511  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13512  }
    13513  break;
    13515  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13516  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13517  break;
    13518  default:
    13519  break;
    13520  }
    13521 
    13522  *pMemoryTypeIndex = UINT32_MAX;
    13523  uint32_t minCost = UINT32_MAX;
    13524  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13525  memTypeIndex < allocator->GetMemoryTypeCount();
    13526  ++memTypeIndex, memTypeBit <<= 1)
    13527  {
    13528  // This memory type is acceptable according to memoryTypeBits bitmask.
    13529  if((memTypeBit & memoryTypeBits) != 0)
    13530  {
    13531  const VkMemoryPropertyFlags currFlags =
    13532  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13533  // This memory type contains requiredFlags.
    13534  if((requiredFlags & ~currFlags) == 0)
    13535  {
    13536  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13537  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13538  // Remember memory type with lowest cost.
    13539  if(currCost < minCost)
    13540  {
    13541  *pMemoryTypeIndex = memTypeIndex;
    13542  if(currCost == 0)
    13543  {
    13544  return VK_SUCCESS;
    13545  }
    13546  minCost = currCost;
    13547  }
    13548  }
    13549  }
    13550  }
    13551  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  const VkBufferCreateInfo* pBufferCreateInfo,
    13557  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13558  uint32_t* pMemoryTypeIndex)
    13559 {
    13560  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13561  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13562  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13563  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13564 
    13565  const VkDevice hDev = allocator->m_hDevice;
    13566  VkBuffer hBuffer = VK_NULL_HANDLE;
    13567  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13568  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13569  if(res == VK_SUCCESS)
    13570  {
    13571  VkMemoryRequirements memReq = {};
    13572  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13573  hDev, hBuffer, &memReq);
    13574 
    13575  res = vmaFindMemoryTypeIndex(
    13576  allocator,
    13577  memReq.memoryTypeBits,
    13578  pAllocationCreateInfo,
    13579  pMemoryTypeIndex);
    13580 
    13581  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13582  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13583  }
    13584  return res;
    13585 }
    13586 
    13588  VmaAllocator allocator,
    13589  const VkImageCreateInfo* pImageCreateInfo,
    13590  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13591  uint32_t* pMemoryTypeIndex)
    13592 {
    13593  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13594  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13597 
    13598  const VkDevice hDev = allocator->m_hDevice;
    13599  VkImage hImage = VK_NULL_HANDLE;
    13600  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13601  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13602  if(res == VK_SUCCESS)
    13603  {
    13604  VkMemoryRequirements memReq = {};
    13605  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13606  hDev, hImage, &memReq);
    13607 
    13608  res = vmaFindMemoryTypeIndex(
    13609  allocator,
    13610  memReq.memoryTypeBits,
    13611  pAllocationCreateInfo,
    13612  pMemoryTypeIndex);
    13613 
    13614  allocator->GetVulkanFunctions().vkDestroyImage(
    13615  hDev, hImage, allocator->GetAllocationCallbacks());
    13616  }
    13617  return res;
    13618 }
    13619 
    13620 VkResult vmaCreatePool(
    13621  VmaAllocator allocator,
    13622  const VmaPoolCreateInfo* pCreateInfo,
    13623  VmaPool* pPool)
    13624 {
    13625  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13626 
    13627  VMA_DEBUG_LOG("vmaCreatePool");
    13628 
    13629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13630 
    13631  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13632 
    13633 #if VMA_RECORDING_ENABLED
    13634  if(allocator->GetRecorder() != VMA_NULL)
    13635  {
    13636  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13637  }
    13638 #endif
    13639 
    13640  return res;
    13641 }
    13642 
    13643 void vmaDestroyPool(
    13644  VmaAllocator allocator,
    13645  VmaPool pool)
    13646 {
    13647  VMA_ASSERT(allocator);
    13648 
    13649  if(pool == VK_NULL_HANDLE)
    13650  {
    13651  return;
    13652  }
    13653 
    13654  VMA_DEBUG_LOG("vmaDestroyPool");
    13655 
    13656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13657 
    13658 #if VMA_RECORDING_ENABLED
    13659  if(allocator->GetRecorder() != VMA_NULL)
    13660  {
    13661  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13662  }
    13663 #endif
    13664 
    13665  allocator->DestroyPool(pool);
    13666 }
    13667 
    13668 void vmaGetPoolStats(
    13669  VmaAllocator allocator,
    13670  VmaPool pool,
    13671  VmaPoolStats* pPoolStats)
    13672 {
    13673  VMA_ASSERT(allocator && pool && pPoolStats);
    13674 
    13675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13676 
    13677  allocator->GetPoolStats(pool, pPoolStats);
    13678 }
    13679 
    13681  VmaAllocator allocator,
    13682  VmaPool pool,
    13683  size_t* pLostAllocationCount)
    13684 {
    13685  VMA_ASSERT(allocator && pool);
    13686 
    13687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13688 
    13689 #if VMA_RECORDING_ENABLED
    13690  if(allocator->GetRecorder() != VMA_NULL)
    13691  {
    13692  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13693  }
    13694 #endif
    13695 
    13696  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13697 }
    13698 
    13699 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13700 {
    13701  VMA_ASSERT(allocator && pool);
    13702 
    13703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13704 
    13705  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13706 
    13707  return allocator->CheckPoolCorruption(pool);
    13708 }
    13709 
    13710 VkResult vmaAllocateMemory(
    13711  VmaAllocator allocator,
    13712  const VkMemoryRequirements* pVkMemoryRequirements,
    13713  const VmaAllocationCreateInfo* pCreateInfo,
    13714  VmaAllocation* pAllocation,
    13715  VmaAllocationInfo* pAllocationInfo)
    13716 {
    13717  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13718 
    13719  VMA_DEBUG_LOG("vmaAllocateMemory");
    13720 
    13721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13722 
    13723  VkResult result = allocator->AllocateMemory(
    13724  *pVkMemoryRequirements,
    13725  false, // requiresDedicatedAllocation
    13726  false, // prefersDedicatedAllocation
    13727  VK_NULL_HANDLE, // dedicatedBuffer
    13728  VK_NULL_HANDLE, // dedicatedImage
    13729  *pCreateInfo,
    13730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13731  pAllocation);
    13732 
    13733 #if VMA_RECORDING_ENABLED
    13734  if(allocator->GetRecorder() != VMA_NULL)
    13735  {
    13736  allocator->GetRecorder()->RecordAllocateMemory(
    13737  allocator->GetCurrentFrameIndex(),
    13738  *pVkMemoryRequirements,
    13739  *pCreateInfo,
    13740  *pAllocation);
    13741  }
    13742 #endif
    13743 
    13744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13745  {
    13746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13747  }
    13748 
    13749  return result;
    13750 }
    13751 
    13753  VmaAllocator allocator,
    13754  VkBuffer buffer,
    13755  const VmaAllocationCreateInfo* pCreateInfo,
    13756  VmaAllocation* pAllocation,
    13757  VmaAllocationInfo* pAllocationInfo)
    13758 {
    13759  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13760 
    13761  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13762 
    13763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13764 
    13765  VkMemoryRequirements vkMemReq = {};
    13766  bool requiresDedicatedAllocation = false;
    13767  bool prefersDedicatedAllocation = false;
    13768  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13769  requiresDedicatedAllocation,
    13770  prefersDedicatedAllocation);
    13771 
    13772  VkResult result = allocator->AllocateMemory(
    13773  vkMemReq,
    13774  requiresDedicatedAllocation,
    13775  prefersDedicatedAllocation,
    13776  buffer, // dedicatedBuffer
    13777  VK_NULL_HANDLE, // dedicatedImage
    13778  *pCreateInfo,
    13779  VMA_SUBALLOCATION_TYPE_BUFFER,
    13780  pAllocation);
    13781 
    13782 #if VMA_RECORDING_ENABLED
    13783  if(allocator->GetRecorder() != VMA_NULL)
    13784  {
    13785  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13786  allocator->GetCurrentFrameIndex(),
    13787  vkMemReq,
    13788  requiresDedicatedAllocation,
    13789  prefersDedicatedAllocation,
    13790  *pCreateInfo,
    13791  *pAllocation);
    13792  }
    13793 #endif
    13794 
    13795  if(pAllocationInfo && result == VK_SUCCESS)
    13796  {
    13797  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13798  }
    13799 
    13800  return result;
    13801 }
    13802 
    13803 VkResult vmaAllocateMemoryForImage(
    13804  VmaAllocator allocator,
    13805  VkImage image,
    13806  const VmaAllocationCreateInfo* pCreateInfo,
    13807  VmaAllocation* pAllocation,
    13808  VmaAllocationInfo* pAllocationInfo)
    13809 {
    13810  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13811 
    13812  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13813 
    13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13815 
    13816  VkMemoryRequirements vkMemReq = {};
    13817  bool requiresDedicatedAllocation = false;
    13818  bool prefersDedicatedAllocation = false;
    13819  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13820  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13821 
    13822  VkResult result = allocator->AllocateMemory(
    13823  vkMemReq,
    13824  requiresDedicatedAllocation,
    13825  prefersDedicatedAllocation,
    13826  VK_NULL_HANDLE, // dedicatedBuffer
    13827  image, // dedicatedImage
    13828  *pCreateInfo,
    13829  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13830  pAllocation);
    13831 
    13832 #if VMA_RECORDING_ENABLED
    13833  if(allocator->GetRecorder() != VMA_NULL)
    13834  {
    13835  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13836  allocator->GetCurrentFrameIndex(),
    13837  vkMemReq,
    13838  requiresDedicatedAllocation,
    13839  prefersDedicatedAllocation,
    13840  *pCreateInfo,
    13841  *pAllocation);
    13842  }
    13843 #endif
    13844 
    13845  if(pAllocationInfo && result == VK_SUCCESS)
    13846  {
    13847  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13848  }
    13849 
    13850  return result;
    13851 }
    13852 
    13853 void vmaFreeMemory(
    13854  VmaAllocator allocator,
    13855  VmaAllocation allocation)
    13856 {
    13857  VMA_ASSERT(allocator);
    13858 
    13859  if(allocation == VK_NULL_HANDLE)
    13860  {
    13861  return;
    13862  }
    13863 
    13864  VMA_DEBUG_LOG("vmaFreeMemory");
    13865 
    13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordFreeMemory(
    13872  allocator->GetCurrentFrameIndex(),
    13873  allocation);
    13874  }
    13875 #endif
    13876 
    13877  allocator->FreeMemory(allocation);
    13878 }
    13879 
    13881  VmaAllocator allocator,
    13882  VmaAllocation allocation,
    13883  VmaAllocationInfo* pAllocationInfo)
    13884 {
    13885  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13886 
    13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13888 
    13889 #if VMA_RECORDING_ENABLED
    13890  if(allocator->GetRecorder() != VMA_NULL)
    13891  {
    13892  allocator->GetRecorder()->RecordGetAllocationInfo(
    13893  allocator->GetCurrentFrameIndex(),
    13894  allocation);
    13895  }
    13896 #endif
    13897 
    13898  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13899 }
    13900 
    13901 VkBool32 vmaTouchAllocation(
    13902  VmaAllocator allocator,
    13903  VmaAllocation allocation)
    13904 {
    13905  VMA_ASSERT(allocator && allocation);
    13906 
    13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13908 
    13909 #if VMA_RECORDING_ENABLED
    13910  if(allocator->GetRecorder() != VMA_NULL)
    13911  {
    13912  allocator->GetRecorder()->RecordTouchAllocation(
    13913  allocator->GetCurrentFrameIndex(),
    13914  allocation);
    13915  }
    13916 #endif
    13917 
    13918  return allocator->TouchAllocation(allocation);
    13919 }
    13920 
    13922  VmaAllocator allocator,
    13923  VmaAllocation allocation,
    13924  void* pUserData)
    13925 {
    13926  VMA_ASSERT(allocator && allocation);
    13927 
    13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13929 
    13930  allocation->SetUserData(allocator, pUserData);
    13931 
    13932 #if VMA_RECORDING_ENABLED
    13933  if(allocator->GetRecorder() != VMA_NULL)
    13934  {
    13935  allocator->GetRecorder()->RecordSetAllocationUserData(
    13936  allocator->GetCurrentFrameIndex(),
    13937  allocation,
    13938  pUserData);
    13939  }
    13940 #endif
    13941 }
    13942 
    13944  VmaAllocator allocator,
    13945  VmaAllocation* pAllocation)
    13946 {
    13947  VMA_ASSERT(allocator && pAllocation);
    13948 
    13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13950 
    13951  allocator->CreateLostAllocation(pAllocation);
    13952 
    13953 #if VMA_RECORDING_ENABLED
    13954  if(allocator->GetRecorder() != VMA_NULL)
    13955  {
    13956  allocator->GetRecorder()->RecordCreateLostAllocation(
    13957  allocator->GetCurrentFrameIndex(),
    13958  *pAllocation);
    13959  }
    13960 #endif
    13961 }
    13962 
    13963 VkResult vmaMapMemory(
    13964  VmaAllocator allocator,
    13965  VmaAllocation allocation,
    13966  void** ppData)
    13967 {
    13968  VMA_ASSERT(allocator && allocation && ppData);
    13969 
    13970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13971 
    13972  VkResult res = allocator->Map(allocation, ppData);
    13973 
    13974 #if VMA_RECORDING_ENABLED
    13975  if(allocator->GetRecorder() != VMA_NULL)
    13976  {
    13977  allocator->GetRecorder()->RecordMapMemory(
    13978  allocator->GetCurrentFrameIndex(),
    13979  allocation);
    13980  }
    13981 #endif
    13982 
    13983  return res;
    13984 }
    13985 
    13986 void vmaUnmapMemory(
    13987  VmaAllocator allocator,
    13988  VmaAllocation allocation)
    13989 {
    13990  VMA_ASSERT(allocator && allocation);
    13991 
    13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13993 
    13994 #if VMA_RECORDING_ENABLED
    13995  if(allocator->GetRecorder() != VMA_NULL)
    13996  {
    13997  allocator->GetRecorder()->RecordUnmapMemory(
    13998  allocator->GetCurrentFrameIndex(),
    13999  allocation);
    14000  }
    14001 #endif
    14002 
    14003  allocator->Unmap(allocation);
    14004 }
    14005 
    14006 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14007 {
    14008  VMA_ASSERT(allocator && allocation);
    14009 
    14010  VMA_DEBUG_LOG("vmaFlushAllocation");
    14011 
    14012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14013 
    14014  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14015 
    14016 #if VMA_RECORDING_ENABLED
    14017  if(allocator->GetRecorder() != VMA_NULL)
    14018  {
    14019  allocator->GetRecorder()->RecordFlushAllocation(
    14020  allocator->GetCurrentFrameIndex(),
    14021  allocation, offset, size);
    14022  }
    14023 #endif
    14024 }
    14025 
    14026 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14027 {
    14028  VMA_ASSERT(allocator && allocation);
    14029 
    14030  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14031 
    14032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14033 
    14034  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14035 
    14036 #if VMA_RECORDING_ENABLED
    14037  if(allocator->GetRecorder() != VMA_NULL)
    14038  {
    14039  allocator->GetRecorder()->RecordInvalidateAllocation(
    14040  allocator->GetCurrentFrameIndex(),
    14041  allocation, offset, size);
    14042  }
    14043 #endif
    14044 }
    14045 
    14046 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14047 {
    14048  VMA_ASSERT(allocator);
    14049 
    14050  VMA_DEBUG_LOG("vmaCheckCorruption");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->CheckCorruption(memoryTypeBits);
    14055 }
    14056 
    14057 VkResult vmaDefragment(
    14058  VmaAllocator allocator,
    14059  VmaAllocation* pAllocations,
    14060  size_t allocationCount,
    14061  VkBool32* pAllocationsChanged,
    14062  const VmaDefragmentationInfo *pDefragmentationInfo,
    14063  VmaDefragmentationStats* pDefragmentationStats)
    14064 {
    14065  VMA_ASSERT(allocator && pAllocations);
    14066 
    14067  VMA_DEBUG_LOG("vmaDefragment");
    14068 
    14069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14070 
    14071  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14072 }
    14073 
    14074 VkResult vmaBindBufferMemory(
    14075  VmaAllocator allocator,
    14076  VmaAllocation allocation,
    14077  VkBuffer buffer)
    14078 {
    14079  VMA_ASSERT(allocator && allocation && buffer);
    14080 
    14081  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14082 
    14083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14084 
    14085  return allocator->BindBufferMemory(allocation, buffer);
    14086 }
    14087 
    14088 VkResult vmaBindImageMemory(
    14089  VmaAllocator allocator,
    14090  VmaAllocation allocation,
    14091  VkImage image)
    14092 {
    14093  VMA_ASSERT(allocator && allocation && image);
    14094 
    14095  VMA_DEBUG_LOG("vmaBindImageMemory");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  return allocator->BindImageMemory(allocation, image);
    14100 }
    14101 
    14102 VkResult vmaCreateBuffer(
    14103  VmaAllocator allocator,
    14104  const VkBufferCreateInfo* pBufferCreateInfo,
    14105  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14106  VkBuffer* pBuffer,
    14107  VmaAllocation* pAllocation,
    14108  VmaAllocationInfo* pAllocationInfo)
    14109 {
    14110  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14111 
    14112  if(pBufferCreateInfo->size == 0)
    14113  {
    14114  return VK_ERROR_VALIDATION_FAILED_EXT;
    14115  }
    14116 
    14117  VMA_DEBUG_LOG("vmaCreateBuffer");
    14118 
    14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14120 
    14121  *pBuffer = VK_NULL_HANDLE;
    14122  *pAllocation = VK_NULL_HANDLE;
    14123 
    14124  // 1. Create VkBuffer.
    14125  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14126  allocator->m_hDevice,
    14127  pBufferCreateInfo,
    14128  allocator->GetAllocationCallbacks(),
    14129  pBuffer);
    14130  if(res >= 0)
    14131  {
    14132  // 2. vkGetBufferMemoryRequirements.
    14133  VkMemoryRequirements vkMemReq = {};
    14134  bool requiresDedicatedAllocation = false;
    14135  bool prefersDedicatedAllocation = false;
    14136  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14137  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14138 
    14139  // Make sure alignment requirements for specific buffer usages reported
    14140  // in Physical Device Properties are included in alignment reported by memory requirements.
    14141  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14142  {
    14143  VMA_ASSERT(vkMemReq.alignment %
    14144  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14145  }
    14146  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14147  {
    14148  VMA_ASSERT(vkMemReq.alignment %
    14149  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14150  }
    14151  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14152  {
    14153  VMA_ASSERT(vkMemReq.alignment %
    14154  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14155  }
    14156 
    14157  // 3. Allocate memory using allocator.
    14158  res = allocator->AllocateMemory(
    14159  vkMemReq,
    14160  requiresDedicatedAllocation,
    14161  prefersDedicatedAllocation,
    14162  *pBuffer, // dedicatedBuffer
    14163  VK_NULL_HANDLE, // dedicatedImage
    14164  *pAllocationCreateInfo,
    14165  VMA_SUBALLOCATION_TYPE_BUFFER,
    14166  pAllocation);
    14167 
    14168 #if VMA_RECORDING_ENABLED
    14169  if(allocator->GetRecorder() != VMA_NULL)
    14170  {
    14171  allocator->GetRecorder()->RecordCreateBuffer(
    14172  allocator->GetCurrentFrameIndex(),
    14173  *pBufferCreateInfo,
    14174  *pAllocationCreateInfo,
    14175  *pAllocation);
    14176  }
    14177 #endif
    14178 
    14179  if(res >= 0)
    14180  {
    14181  // 3. Bind buffer with memory.
    14182  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14183  if(res >= 0)
    14184  {
    14185  // All steps succeeded.
    14186  #if VMA_STATS_STRING_ENABLED
    14187  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14188  #endif
    14189  if(pAllocationInfo != VMA_NULL)
    14190  {
    14191  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14192  }
    14193 
    14194  return VK_SUCCESS;
    14195  }
    14196  allocator->FreeMemory(*pAllocation);
    14197  *pAllocation = VK_NULL_HANDLE;
    14198  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14199  *pBuffer = VK_NULL_HANDLE;
    14200  return res;
    14201  }
    14202  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14203  *pBuffer = VK_NULL_HANDLE;
    14204  return res;
    14205  }
    14206  return res;
    14207 }
    14208 
    14209 void vmaDestroyBuffer(
    14210  VmaAllocator allocator,
    14211  VkBuffer buffer,
    14212  VmaAllocation allocation)
    14213 {
    14214  VMA_ASSERT(allocator);
    14215 
    14216  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14217  {
    14218  return;
    14219  }
    14220 
    14221  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14222 
    14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14224 
    14225 #if VMA_RECORDING_ENABLED
    14226  if(allocator->GetRecorder() != VMA_NULL)
    14227  {
    14228  allocator->GetRecorder()->RecordDestroyBuffer(
    14229  allocator->GetCurrentFrameIndex(),
    14230  allocation);
    14231  }
    14232 #endif
    14233 
    14234  if(buffer != VK_NULL_HANDLE)
    14235  {
    14236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14237  }
    14238 
    14239  if(allocation != VK_NULL_HANDLE)
    14240  {
    14241  allocator->FreeMemory(allocation);
    14242  }
    14243 }
    14244 
    14245 VkResult vmaCreateImage(
    14246  VmaAllocator allocator,
    14247  const VkImageCreateInfo* pImageCreateInfo,
    14248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14249  VkImage* pImage,
    14250  VmaAllocation* pAllocation,
    14251  VmaAllocationInfo* pAllocationInfo)
    14252 {
    14253  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14254 
    14255  if(pImageCreateInfo->extent.width == 0 ||
    14256  pImageCreateInfo->extent.height == 0 ||
    14257  pImageCreateInfo->extent.depth == 0 ||
    14258  pImageCreateInfo->mipLevels == 0 ||
    14259  pImageCreateInfo->arrayLayers == 0)
    14260  {
    14261  return VK_ERROR_VALIDATION_FAILED_EXT;
    14262  }
    14263 
    14264  VMA_DEBUG_LOG("vmaCreateImage");
    14265 
    14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14267 
    14268  *pImage = VK_NULL_HANDLE;
    14269  *pAllocation = VK_NULL_HANDLE;
    14270 
    14271  // 1. Create VkImage.
    14272  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14273  allocator->m_hDevice,
    14274  pImageCreateInfo,
    14275  allocator->GetAllocationCallbacks(),
    14276  pImage);
    14277  if(res >= 0)
    14278  {
    14279  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14280  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14281  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14282 
    14283  // 2. Allocate memory using allocator.
    14284  VkMemoryRequirements vkMemReq = {};
    14285  bool requiresDedicatedAllocation = false;
    14286  bool prefersDedicatedAllocation = false;
    14287  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14288  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14289 
    14290  res = allocator->AllocateMemory(
    14291  vkMemReq,
    14292  requiresDedicatedAllocation,
    14293  prefersDedicatedAllocation,
    14294  VK_NULL_HANDLE, // dedicatedBuffer
    14295  *pImage, // dedicatedImage
    14296  *pAllocationCreateInfo,
    14297  suballocType,
    14298  pAllocation);
    14299 
    14300 #if VMA_RECORDING_ENABLED
    14301  if(allocator->GetRecorder() != VMA_NULL)
    14302  {
    14303  allocator->GetRecorder()->RecordCreateImage(
    14304  allocator->GetCurrentFrameIndex(),
    14305  *pImageCreateInfo,
    14306  *pAllocationCreateInfo,
    14307  *pAllocation);
    14308  }
    14309 #endif
    14310 
    14311  if(res >= 0)
    14312  {
    14313  // 3. Bind image with memory.
    14314  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14315  if(res >= 0)
    14316  {
    14317  // All steps succeeded.
    14318  #if VMA_STATS_STRING_ENABLED
    14319  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14320  #endif
    14321  if(pAllocationInfo != VMA_NULL)
    14322  {
    14323  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14324  }
    14325 
    14326  return VK_SUCCESS;
    14327  }
    14328  allocator->FreeMemory(*pAllocation);
    14329  *pAllocation = VK_NULL_HANDLE;
    14330  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14331  *pImage = VK_NULL_HANDLE;
    14332  return res;
    14333  }
    14334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14335  *pImage = VK_NULL_HANDLE;
    14336  return res;
    14337  }
    14338  return res;
    14339 }
    14340 
    14341 void vmaDestroyImage(
    14342  VmaAllocator allocator,
    14343  VkImage image,
    14344  VmaAllocation allocation)
    14345 {
    14346  VMA_ASSERT(allocator);
    14347 
    14348  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14349  {
    14350  return;
    14351  }
    14352 
    14353  VMA_DEBUG_LOG("vmaDestroyImage");
    14354 
    14355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14356 
    14357 #if VMA_RECORDING_ENABLED
    14358  if(allocator->GetRecorder() != VMA_NULL)
    14359  {
    14360  allocator->GetRecorder()->RecordDestroyImage(
    14361  allocator->GetCurrentFrameIndex(),
    14362  allocation);
    14363  }
    14364 #endif
    14365 
    14366  if(image != VK_NULL_HANDLE)
    14367  {
    14368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14369  }
    14370  if(allocation != VK_NULL_HANDLE)
    14371  {
    14372  allocator->FreeMemory(allocation);
    14373  }
    14374 }
    14375 
    14376 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1885
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1632
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1641
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1606
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2198
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1587
    +
    Definition: vk_mem_alloc.h:1615
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2207
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1596
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1833
    -
    Definition: vk_mem_alloc.h:1936
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1579
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2298
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1629
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2543
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2087
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1476
    +
    Definition: vk_mem_alloc.h:1842
    +
    Definition: vk_mem_alloc.h:1945
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1588
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2307
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1638
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2552
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2096
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1485
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2179
    -
    Definition: vk_mem_alloc.h:1913
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1568
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1986
    -
    Definition: vk_mem_alloc.h:1860
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1641
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2115
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2188
    +
    Definition: vk_mem_alloc.h:1922
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1577
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1995
    +
    Definition: vk_mem_alloc.h:1869
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1650
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2124
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1694
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1626
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1703
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1635
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1864
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1873
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1766
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1584
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1765
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2547
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1775
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1593
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1774
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2556
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1658
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1775
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2555
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1970
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2538
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1585
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1510
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1667
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1784
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2564
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1979
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2547
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1594
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1519
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1635
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1644
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2129
    -
    Definition: vk_mem_alloc.h:2123
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1701
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2308
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2138
    +
    Definition: vk_mem_alloc.h:2132
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1710
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2317
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1580
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1604
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2007
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2149
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2185
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1589
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1613
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2016
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2158
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2194
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1566
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2132
    +
    Definition: vk_mem_alloc.h:1575
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2141
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1811
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1820
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2533
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2542
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2551
    -
    Definition: vk_mem_alloc.h:1850
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:1994
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1583
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2560
    +
    Definition: vk_mem_alloc.h:1859
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2003
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1592
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1771
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1516
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1780
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1525
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1537
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1546
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1608
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1542
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2553
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1617
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1551
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2562
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1981
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2195
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1990
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2204
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1576
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1754
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2144
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1529
    -
    Definition: vk_mem_alloc.h:2119
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1585
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1763
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2153
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1538
    +
    Definition: vk_mem_alloc.h:2128
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1920
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1767
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1533
    -
    Definition: vk_mem_alloc.h:1944
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2135
    -
    Definition: vk_mem_alloc.h:1859
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1582
    +
    Definition: vk_mem_alloc.h:1929
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1776
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    +
    Definition: vk_mem_alloc.h:1953
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2144
    +
    Definition: vk_mem_alloc.h:1868
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1591
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1976
    -
    Definition: vk_mem_alloc.h:1967
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1985
    +
    Definition: vk_mem_alloc.h:1976
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1757
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1578
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2157
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1644
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2188
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1965
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2000
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1766
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1587
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2166
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1653
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2197
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1974
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2009
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1682
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1773
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1900
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1766
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1691
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1782
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1909
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1775
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1589
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1614
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1531
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1588
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1598
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1623
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1540
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1597
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2171
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1581
    -
    Definition: vk_mem_alloc.h:1931
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2180
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1590
    +
    Definition: vk_mem_alloc.h:1940
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1622
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2322
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1638
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1766
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1763
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1631
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2331
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1647
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1775
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1772
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2176
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2185
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1940
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2303
    -
    Definition: vk_mem_alloc.h:1951
    -
    Definition: vk_mem_alloc.h:1963
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2549
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1574
    +
    Definition: vk_mem_alloc.h:1949
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2312
    +
    Definition: vk_mem_alloc.h:1960
    +
    Definition: vk_mem_alloc.h:1972
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2558
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1583
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1761
    -
    Definition: vk_mem_alloc.h:1816
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2125
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1770
    +
    Definition: vk_mem_alloc.h:1825
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2134
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1611
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1759
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1586
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1590
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1887
    -
    Definition: vk_mem_alloc.h:1958
    -
    Definition: vk_mem_alloc.h:1843
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2317
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1620
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1768
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1595
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1599
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1896
    +
    Definition: vk_mem_alloc.h:1967
    +
    Definition: vk_mem_alloc.h:1852
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2326
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1564
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1573
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1577
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2104
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2284
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1586
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2113
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2293
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1948
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2069
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1767
    +
    Definition: vk_mem_alloc.h:1957
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2078
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1776
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1598
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1774
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1607
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1783
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2182
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1767
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2191
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1776
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2289
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2298
    + + +

    ◆ vmaResizeAllocation()

    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + +
    VkResult vmaResizeAllocation (VmaAllocator allocator,
    VmaAllocation allocation,
    VkDeviceSize newSize 
    )
    +
    + +

    Tries to resize an allocation in place, if there is enough free memory after it.

    +

    Tries to change allocation's size without moving or reallocating it. You can both shrink and grow allocation size. When growing, it succeeds only when the allocation belongs to a memory block with enough free space after it.

    +

    Returns VK_SUCCESS if allocation's size has been successfully changed. Returns VK_ERROR_OUT_OF_POOL_MEMORY if allocation's size could not be changed.

    +

    After successful call to this function, VmaAllocationInfo::size of this allocation changes. All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer.

    +
      +
    • Calling this function on allocation that is in lost state fails with result VK_ERROR_VALIDATION_FAILED_EXT.
    • +
    • Calling this function with newSize same as current allocation size does nothing and returns VK_SUCCESS.
    • +
    • Resizing dedicated allocations, as well as allocations created in pools that use linear or buddy algorithm, is not supported. The function returns VK_ERROR_FEATURE_NOT_PRESENT in such cases. Support may be added in the future.
    • +
    +
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index b5e44ad..3e42ce3 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,7 +65,7 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2394  VmaAllocator allocator,
    2395  VmaAllocation allocation,
    2396  VmaAllocationInfo* pAllocationInfo);
    2397 
    2412 VkBool32 vmaTouchAllocation(
    2413  VmaAllocator allocator,
    2414  VmaAllocation allocation);
    2415 
    2430  VmaAllocator allocator,
    2431  VmaAllocation allocation,
    2432  void* pUserData);
    2433 
    2445  VmaAllocator allocator,
    2446  VmaAllocation* pAllocation);
    2447 
    2482 VkResult vmaMapMemory(
    2483  VmaAllocator allocator,
    2484  VmaAllocation allocation,
    2485  void** ppData);
    2486 
    2491 void vmaUnmapMemory(
    2492  VmaAllocator allocator,
    2493  VmaAllocation allocation);
    2494 
    2507 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2508 
    2521 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2522 
    2539 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2540 
    2542 typedef struct VmaDefragmentationInfo {
    2547  VkDeviceSize maxBytesToMove;
    2554 
    2556 typedef struct VmaDefragmentationStats {
    2558  VkDeviceSize bytesMoved;
    2560  VkDeviceSize bytesFreed;
    2566 
    2605 VkResult vmaDefragment(
    2606  VmaAllocator allocator,
    2607  VmaAllocation* pAllocations,
    2608  size_t allocationCount,
    2609  VkBool32* pAllocationsChanged,
    2610  const VmaDefragmentationInfo *pDefragmentationInfo,
    2611  VmaDefragmentationStats* pDefragmentationStats);
    2612 
    2625 VkResult vmaBindBufferMemory(
    2626  VmaAllocator allocator,
    2627  VmaAllocation allocation,
    2628  VkBuffer buffer);
    2629 
    2642 VkResult vmaBindImageMemory(
    2643  VmaAllocator allocator,
    2644  VmaAllocation allocation,
    2645  VkImage image);
    2646 
    2673 VkResult vmaCreateBuffer(
    2674  VmaAllocator allocator,
    2675  const VkBufferCreateInfo* pBufferCreateInfo,
    2676  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2677  VkBuffer* pBuffer,
    2678  VmaAllocation* pAllocation,
    2679  VmaAllocationInfo* pAllocationInfo);
    2680 
    2692 void vmaDestroyBuffer(
    2693  VmaAllocator allocator,
    2694  VkBuffer buffer,
    2695  VmaAllocation allocation);
    2696 
    2698 VkResult vmaCreateImage(
    2699  VmaAllocator allocator,
    2700  const VkImageCreateInfo* pImageCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkImage* pImage,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyImage(
    2718  VmaAllocator allocator,
    2719  VkImage image,
    2720  VmaAllocation allocation);
    2721 
    2722 #ifdef __cplusplus
    2723 }
    2724 #endif
    2725 
    2726 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2727 
    2728 // For Visual Studio IntelliSense.
    2729 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2730 #define VMA_IMPLEMENTATION
    2731 #endif
    2732 
    2733 #ifdef VMA_IMPLEMENTATION
    2734 #undef VMA_IMPLEMENTATION
    2735 
    2736 #include <cstdint>
    2737 #include <cstdlib>
    2738 #include <cstring>
    2739 
    2740 /*******************************************************************************
    2741 CONFIGURATION SECTION
    2742 
    2743 Define some of these macros before each #include of this header or change them
    2744 here if you need other then default behavior depending on your environment.
    2745 */
    2746 
    2747 /*
    2748 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2749 internally, like:
    2750 
    2751  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2752 
    2753 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2754 VmaAllocatorCreateInfo::pVulkanFunctions.
    2755 */
    2756 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2757 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2758 #endif
    2759 
    2760 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2761 //#define VMA_USE_STL_CONTAINERS 1
    2762 
    2763 /* Set this macro to 1 to make the library including and using STL containers:
    2764 std::pair, std::vector, std::list, std::unordered_map.
    2765 
    2766 Set it to 0 or undefined to make the library using its own implementation of
    2767 the containers.
    2768 */
    2769 #if VMA_USE_STL_CONTAINERS
    2770  #define VMA_USE_STL_VECTOR 1
    2771  #define VMA_USE_STL_UNORDERED_MAP 1
    2772  #define VMA_USE_STL_LIST 1
    2773 #endif
    2774 
    2775 #if VMA_USE_STL_VECTOR
    2776  #include <vector>
    2777 #endif
    2778 
    2779 #if VMA_USE_STL_UNORDERED_MAP
    2780  #include <unordered_map>
    2781 #endif
    2782 
    2783 #if VMA_USE_STL_LIST
    2784  #include <list>
    2785 #endif
    2786 
    2787 /*
    2788 Following headers are used in this CONFIGURATION section only, so feel free to
    2789 remove them if not needed.
    2790 */
    2791 #include <cassert> // for assert
    2792 #include <algorithm> // for min, max
    2793 #include <mutex> // for std::mutex
    2794 #include <atomic> // for std::atomic
    2795 
    2796 #ifndef VMA_NULL
    2797  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2798  #define VMA_NULL nullptr
    2799 #endif
    2800 
    2801 #if defined(__APPLE__) || defined(__ANDROID__)
    2802 #include <cstdlib>
    2803 void *aligned_alloc(size_t alignment, size_t size)
    2804 {
    2805  // alignment must be >= sizeof(void*)
    2806  if(alignment < sizeof(void*))
    2807  {
    2808  alignment = sizeof(void*);
    2809  }
    2810 
    2811  void *pointer;
    2812  if(posix_memalign(&pointer, alignment, size) == 0)
    2813  return pointer;
    2814  return VMA_NULL;
    2815 }
    2816 #endif
    2817 
    2818 // If your compiler is not compatible with C++11 and definition of
    2819 // aligned_alloc() function is missing, uncommeting following line may help:
    2820 
    2821 //#include <malloc.h>
    2822 
    2823 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2824 #ifndef VMA_ASSERT
    2825  #ifdef _DEBUG
    2826  #define VMA_ASSERT(expr) assert(expr)
    2827  #else
    2828  #define VMA_ASSERT(expr)
    2829  #endif
    2830 #endif
    2831 
    2832 // Assert that will be called very often, like inside data structures e.g. operator[].
    2833 // Making it non-empty can make program slow.
    2834 #ifndef VMA_HEAVY_ASSERT
    2835  #ifdef _DEBUG
    2836  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2837  #else
    2838  #define VMA_HEAVY_ASSERT(expr)
    2839  #endif
    2840 #endif
    2841 
    2842 #ifndef VMA_ALIGN_OF
    2843  #define VMA_ALIGN_OF(type) (__alignof(type))
    2844 #endif
    2845 
    2846 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2847  #if defined(_WIN32)
    2848  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2849  #else
    2850  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2851  #endif
    2852 #endif
    2853 
    2854 #ifndef VMA_SYSTEM_FREE
    2855  #if defined(_WIN32)
    2856  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2857  #else
    2858  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2859  #endif
    2860 #endif
    2861 
    2862 #ifndef VMA_MIN
    2863  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2864 #endif
    2865 
    2866 #ifndef VMA_MAX
    2867  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2868 #endif
    2869 
    2870 #ifndef VMA_SWAP
    2871  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2872 #endif
    2873 
    2874 #ifndef VMA_SORT
    2875  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2876 #endif
    2877 
    2878 #ifndef VMA_DEBUG_LOG
    2879  #define VMA_DEBUG_LOG(format, ...)
    2880  /*
    2881  #define VMA_DEBUG_LOG(format, ...) do { \
    2882  printf(format, __VA_ARGS__); \
    2883  printf("\n"); \
    2884  } while(false)
    2885  */
    2886 #endif
    2887 
    2888 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2889 #if VMA_STATS_STRING_ENABLED
    2890  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2891  {
    2892  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2893  }
    2894  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2895  {
    2896  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2897  }
    2898  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2899  {
    2900  snprintf(outStr, strLen, "%p", ptr);
    2901  }
    2902 #endif
    2903 
    2904 #ifndef VMA_MUTEX
    2905  class VmaMutex
    2906  {
    2907  public:
    2908  VmaMutex() { }
    2909  ~VmaMutex() { }
    2910  void Lock() { m_Mutex.lock(); }
    2911  void Unlock() { m_Mutex.unlock(); }
    2912  private:
    2913  std::mutex m_Mutex;
    2914  };
    2915  #define VMA_MUTEX VmaMutex
    2916 #endif
    2917 
    2918 /*
    2919 If providing your own implementation, you need to implement a subset of std::atomic:
    2920 
    2921 - Constructor(uint32_t desired)
    2922 - uint32_t load() const
    2923 - void store(uint32_t desired)
    2924 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2925 */
    2926 #ifndef VMA_ATOMIC_UINT32
    2927  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2928 #endif
    2929 
    2930 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2931 
    2935  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2936 #endif
    2937 
    2938 #ifndef VMA_DEBUG_ALIGNMENT
    2939 
    2943  #define VMA_DEBUG_ALIGNMENT (1)
    2944 #endif
    2945 
    2946 #ifndef VMA_DEBUG_MARGIN
    2947 
    2951  #define VMA_DEBUG_MARGIN (0)
    2952 #endif
    2953 
    2954 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2955 
    2959  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2960 #endif
    2961 
    2962 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    2963 
    2968  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    2969 #endif
    2970 
    2971 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    2972 
    2976  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    2977 #endif
    2978 
    2979 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    2980 
    2984  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    2985 #endif
    2986 
    2987 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    2988  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    2990 #endif
    2991 
    2992 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    2993  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    2995 #endif
    2996 
    2997 #ifndef VMA_CLASS_NO_COPY
    2998  #define VMA_CLASS_NO_COPY(className) \
    2999  private: \
    3000  className(const className&) = delete; \
    3001  className& operator=(const className&) = delete;
    3002 #endif
    3003 
    3004 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3005 
    3006 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3007 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3008 
    3009 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3010 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3011 
    3012 /*******************************************************************************
    3013 END OF CONFIGURATION
    3014 */
    3015 
    3016 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3017  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3018 
    3019 // Returns number of bits set to 1 in (v).
    3020 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3021 {
    3022  uint32_t c = v - ((v >> 1) & 0x55555555);
    3023  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3024  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3025  c = ((c >> 8) + c) & 0x00FF00FF;
    3026  c = ((c >> 16) + c) & 0x0000FFFF;
    3027  return c;
    3028 }
    3029 
    3030 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3031 // Use types like uint32_t, uint64_t as T.
    3032 template <typename T>
    3033 static inline T VmaAlignUp(T val, T align)
    3034 {
    3035  return (val + align - 1) / align * align;
    3036 }
    3037 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3038 // Use types like uint32_t, uint64_t as T.
    3039 template <typename T>
    3040 static inline T VmaAlignDown(T val, T align)
    3041 {
    3042  return val / align * align;
    3043 }
    3044 
    3045 // Division with mathematical rounding to nearest number.
    3046 template <typename T>
    3047 static inline T VmaRoundDiv(T x, T y)
    3048 {
    3049  return (x + (y / (T)2)) / y;
    3050 }
    3051 
    3052 /*
    3053 Returns true if given number is a power of two.
    3054 T must be unsigned integer number or signed integer but always nonnegative.
    3055 For 0 returns true.
    3056 */
    3057 template <typename T>
    3058 inline bool VmaIsPow2(T x)
    3059 {
    3060  return (x & (x-1)) == 0;
    3061 }
    3062 
    3063 // Returns smallest power of 2 greater or equal to v.
    3064 static inline uint32_t VmaNextPow2(uint32_t v)
    3065 {
    3066  v--;
    3067  v |= v >> 1;
    3068  v |= v >> 2;
    3069  v |= v >> 4;
    3070  v |= v >> 8;
    3071  v |= v >> 16;
    3072  v++;
    3073  return v;
    3074 }
    3075 static inline uint64_t VmaNextPow2(uint64_t v)
    3076 {
    3077  v--;
    3078  v |= v >> 1;
    3079  v |= v >> 2;
    3080  v |= v >> 4;
    3081  v |= v >> 8;
    3082  v |= v >> 16;
    3083  v |= v >> 32;
    3084  v++;
    3085  return v;
    3086 }
    3087 
    3088 // Returns largest power of 2 less or equal to v.
    3089 static inline uint32_t VmaPrevPow2(uint32_t v)
    3090 {
    3091  v |= v >> 1;
    3092  v |= v >> 2;
    3093  v |= v >> 4;
    3094  v |= v >> 8;
    3095  v |= v >> 16;
    3096  v = v ^ (v >> 1);
    3097  return v;
    3098 }
    3099 static inline uint64_t VmaPrevPow2(uint64_t v)
    3100 {
    3101  v |= v >> 1;
    3102  v |= v >> 2;
    3103  v |= v >> 4;
    3104  v |= v >> 8;
    3105  v |= v >> 16;
    3106  v |= v >> 32;
    3107  v = v ^ (v >> 1);
    3108  return v;
    3109 }
    3110 
    3111 static inline bool VmaStrIsEmpty(const char* pStr)
    3112 {
    3113  return pStr == VMA_NULL || *pStr == '\0';
    3114 }
    3115 
    3116 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3117 {
    3118  switch(algorithm)
    3119  {
    3121  return "Linear";
    3123  return "Buddy";
    3124  case 0:
    3125  return "Default";
    3126  default:
    3127  VMA_ASSERT(0);
    3128  return "";
    3129  }
    3130 }
    3131 
    3132 #ifndef VMA_SORT
    3133 
    3134 template<typename Iterator, typename Compare>
    3135 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3136 {
    3137  Iterator centerValue = end; --centerValue;
    3138  Iterator insertIndex = beg;
    3139  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3140  {
    3141  if(cmp(*memTypeIndex, *centerValue))
    3142  {
    3143  if(insertIndex != memTypeIndex)
    3144  {
    3145  VMA_SWAP(*memTypeIndex, *insertIndex);
    3146  }
    3147  ++insertIndex;
    3148  }
    3149  }
    3150  if(insertIndex != centerValue)
    3151  {
    3152  VMA_SWAP(*insertIndex, *centerValue);
    3153  }
    3154  return insertIndex;
    3155 }
    3156 
    3157 template<typename Iterator, typename Compare>
    3158 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3159 {
    3160  if(beg < end)
    3161  {
    3162  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3163  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3164  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3165  }
    3166 }
    3167 
    3168 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3169 
    3170 #endif // #ifndef VMA_SORT
    3171 
    3172 /*
    3173 Returns true if two memory blocks occupy overlapping pages.
    3174 ResourceA must be in less memory offset than ResourceB.
    3175 
    3176 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3177 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3178 */
    3179 static inline bool VmaBlocksOnSamePage(
    3180  VkDeviceSize resourceAOffset,
    3181  VkDeviceSize resourceASize,
    3182  VkDeviceSize resourceBOffset,
    3183  VkDeviceSize pageSize)
    3184 {
    3185  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3186  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3187  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3188  VkDeviceSize resourceBStart = resourceBOffset;
    3189  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3190  return resourceAEndPage == resourceBStartPage;
    3191 }
    3192 
    3193 enum VmaSuballocationType
    3194 {
    3195  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3196  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3197  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3198  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3199  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3200  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3201  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3202 };
    3203 
    3204 /*
    3205 Returns true if given suballocation types could conflict and must respect
    3206 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3207 or linear image and another one is optimal image. If type is unknown, behave
    3208 conservatively.
    3209 */
    3210 static inline bool VmaIsBufferImageGranularityConflict(
    3211  VmaSuballocationType suballocType1,
    3212  VmaSuballocationType suballocType2)
    3213 {
    3214  if(suballocType1 > suballocType2)
    3215  {
    3216  VMA_SWAP(suballocType1, suballocType2);
    3217  }
    3218 
    3219  switch(suballocType1)
    3220  {
    3221  case VMA_SUBALLOCATION_TYPE_FREE:
    3222  return false;
    3223  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3224  return true;
    3225  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3226  return
    3227  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3228  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3229  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3230  return
    3231  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3232  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3233  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3234  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3235  return
    3236  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3237  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3238  return false;
    3239  default:
    3240  VMA_ASSERT(0);
    3241  return true;
    3242  }
    3243 }
    3244 
    3245 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3246 {
    3247  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3248  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3249  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3250  {
    3251  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3252  }
    3253 }
    3254 
    3255 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3256 {
    3257  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3258  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3259  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3260  {
    3261  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3262  {
    3263  return false;
    3264  }
    3265  }
    3266  return true;
    3267 }
    3268 
    3269 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3270 struct VmaMutexLock
    3271 {
    3272  VMA_CLASS_NO_COPY(VmaMutexLock)
    3273 public:
    3274  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3275  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3276  {
    3277  if(m_pMutex)
    3278  {
    3279  m_pMutex->Lock();
    3280  }
    3281  }
    3282 
    3283  ~VmaMutexLock()
    3284  {
    3285  if(m_pMutex)
    3286  {
    3287  m_pMutex->Unlock();
    3288  }
    3289  }
    3290 
    3291 private:
    3292  VMA_MUTEX* m_pMutex;
    3293 };
    3294 
    3295 #if VMA_DEBUG_GLOBAL_MUTEX
    3296  static VMA_MUTEX gDebugGlobalMutex;
    3297  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3298 #else
    3299  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3300 #endif
    3301 
    3302 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3303 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3304 
    3305 /*
    3306 Performs binary search and returns iterator to first element that is greater or
    3307 equal to (key), according to comparison (cmp).
    3308 
    3309 Cmp should return true if first argument is less than second argument.
    3310 
    3311 Returned value is the found element, if present in the collection or place where
    3312 new element with value (key) should be inserted.
    3313 */
    3314 template <typename CmpLess, typename IterT, typename KeyT>
    3315 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3316 {
    3317  size_t down = 0, up = (end - beg);
    3318  while(down < up)
    3319  {
    3320  const size_t mid = (down + up) / 2;
    3321  if(cmp(*(beg+mid), key))
    3322  {
    3323  down = mid + 1;
    3324  }
    3325  else
    3326  {
    3327  up = mid;
    3328  }
    3329  }
    3330  return beg + down;
    3331 }
    3332 
    3334 // Memory allocation
    3335 
    3336 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3337 {
    3338  if((pAllocationCallbacks != VMA_NULL) &&
    3339  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3340  {
    3341  return (*pAllocationCallbacks->pfnAllocation)(
    3342  pAllocationCallbacks->pUserData,
    3343  size,
    3344  alignment,
    3345  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3346  }
    3347  else
    3348  {
    3349  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3350  }
    3351 }
    3352 
    3353 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3354 {
    3355  if((pAllocationCallbacks != VMA_NULL) &&
    3356  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3357  {
    3358  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3359  }
    3360  else
    3361  {
    3362  VMA_SYSTEM_FREE(ptr);
    3363  }
    3364 }
    3365 
    3366 template<typename T>
    3367 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3368 {
    3369  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3370 }
    3371 
    3372 template<typename T>
    3373 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3374 {
    3375  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3376 }
    3377 
    3378 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3379 
    3380 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3381 
    3382 template<typename T>
    3383 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3384 {
    3385  ptr->~T();
    3386  VmaFree(pAllocationCallbacks, ptr);
    3387 }
    3388 
    3389 template<typename T>
    3390 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3391 {
    3392  if(ptr != VMA_NULL)
    3393  {
    3394  for(size_t i = count; i--; )
    3395  {
    3396  ptr[i].~T();
    3397  }
    3398  VmaFree(pAllocationCallbacks, ptr);
    3399  }
    3400 }
    3401 
    3402 // STL-compatible allocator.
    3403 template<typename T>
    3404 class VmaStlAllocator
    3405 {
    3406 public:
    3407  const VkAllocationCallbacks* const m_pCallbacks;
    3408  typedef T value_type;
    3409 
    3410  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3411  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3412 
    3413  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3414  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3415 
    3416  template<typename U>
    3417  bool operator==(const VmaStlAllocator<U>& rhs) const
    3418  {
    3419  return m_pCallbacks == rhs.m_pCallbacks;
    3420  }
    3421  template<typename U>
    3422  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3423  {
    3424  return m_pCallbacks != rhs.m_pCallbacks;
    3425  }
    3426 
    3427  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3428 };
    3429 
    3430 #if VMA_USE_STL_VECTOR
    3431 
    3432 #define VmaVector std::vector
    3433 
    3434 template<typename T, typename allocatorT>
    3435 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3436 {
    3437  vec.insert(vec.begin() + index, item);
    3438 }
    3439 
    3440 template<typename T, typename allocatorT>
    3441 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3442 {
    3443  vec.erase(vec.begin() + index);
    3444 }
    3445 
    3446 #else // #if VMA_USE_STL_VECTOR
    3447 
    3448 /* Class with interface compatible with subset of std::vector.
    3449 T must be POD because constructors and destructors are not called and memcpy is
    3450 used for these objects. */
    3451 template<typename T, typename AllocatorT>
    3452 class VmaVector
    3453 {
    3454 public:
    3455  typedef T value_type;
    3456 
    3457  VmaVector(const AllocatorT& allocator) :
    3458  m_Allocator(allocator),
    3459  m_pArray(VMA_NULL),
    3460  m_Count(0),
    3461  m_Capacity(0)
    3462  {
    3463  }
    3464 
    3465  VmaVector(size_t count, const AllocatorT& allocator) :
    3466  m_Allocator(allocator),
    3467  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3468  m_Count(count),
    3469  m_Capacity(count)
    3470  {
    3471  }
    3472 
    3473  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3474  m_Allocator(src.m_Allocator),
    3475  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3476  m_Count(src.m_Count),
    3477  m_Capacity(src.m_Count)
    3478  {
    3479  if(m_Count != 0)
    3480  {
    3481  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3482  }
    3483  }
    3484 
    3485  ~VmaVector()
    3486  {
    3487  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3488  }
    3489 
    3490  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3491  {
    3492  if(&rhs != this)
    3493  {
    3494  resize(rhs.m_Count);
    3495  if(m_Count != 0)
    3496  {
    3497  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3498  }
    3499  }
    3500  return *this;
    3501  }
    3502 
    3503  bool empty() const { return m_Count == 0; }
    3504  size_t size() const { return m_Count; }
    3505  T* data() { return m_pArray; }
    3506  const T* data() const { return m_pArray; }
    3507 
    3508  T& operator[](size_t index)
    3509  {
    3510  VMA_HEAVY_ASSERT(index < m_Count);
    3511  return m_pArray[index];
    3512  }
    3513  const T& operator[](size_t index) const
    3514  {
    3515  VMA_HEAVY_ASSERT(index < m_Count);
    3516  return m_pArray[index];
    3517  }
    3518 
    3519  T& front()
    3520  {
    3521  VMA_HEAVY_ASSERT(m_Count > 0);
    3522  return m_pArray[0];
    3523  }
    3524  const T& front() const
    3525  {
    3526  VMA_HEAVY_ASSERT(m_Count > 0);
    3527  return m_pArray[0];
    3528  }
    3529  T& back()
    3530  {
    3531  VMA_HEAVY_ASSERT(m_Count > 0);
    3532  return m_pArray[m_Count - 1];
    3533  }
    3534  const T& back() const
    3535  {
    3536  VMA_HEAVY_ASSERT(m_Count > 0);
    3537  return m_pArray[m_Count - 1];
    3538  }
    3539 
    3540  void reserve(size_t newCapacity, bool freeMemory = false)
    3541  {
    3542  newCapacity = VMA_MAX(newCapacity, m_Count);
    3543 
    3544  if((newCapacity < m_Capacity) && !freeMemory)
    3545  {
    3546  newCapacity = m_Capacity;
    3547  }
    3548 
    3549  if(newCapacity != m_Capacity)
    3550  {
    3551  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3552  if(m_Count != 0)
    3553  {
    3554  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3555  }
    3556  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3557  m_Capacity = newCapacity;
    3558  m_pArray = newArray;
    3559  }
    3560  }
    3561 
    3562  void resize(size_t newCount, bool freeMemory = false)
    3563  {
    3564  size_t newCapacity = m_Capacity;
    3565  if(newCount > m_Capacity)
    3566  {
    3567  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3568  }
    3569  else if(freeMemory)
    3570  {
    3571  newCapacity = newCount;
    3572  }
    3573 
    3574  if(newCapacity != m_Capacity)
    3575  {
    3576  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3577  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3578  if(elementsToCopy != 0)
    3579  {
    3580  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3581  }
    3582  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3583  m_Capacity = newCapacity;
    3584  m_pArray = newArray;
    3585  }
    3586 
    3587  m_Count = newCount;
    3588  }
    3589 
    3590  void clear(bool freeMemory = false)
    3591  {
    3592  resize(0, freeMemory);
    3593  }
    3594 
    3595  void insert(size_t index, const T& src)
    3596  {
    3597  VMA_HEAVY_ASSERT(index <= m_Count);
    3598  const size_t oldCount = size();
    3599  resize(oldCount + 1);
    3600  if(index < oldCount)
    3601  {
    3602  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3603  }
    3604  m_pArray[index] = src;
    3605  }
    3606 
    3607  void remove(size_t index)
    3608  {
    3609  VMA_HEAVY_ASSERT(index < m_Count);
    3610  const size_t oldCount = size();
    3611  if(index < oldCount - 1)
    3612  {
    3613  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3614  }
    3615  resize(oldCount - 1);
    3616  }
    3617 
    3618  void push_back(const T& src)
    3619  {
    3620  const size_t newIndex = size();
    3621  resize(newIndex + 1);
    3622  m_pArray[newIndex] = src;
    3623  }
    3624 
    3625  void pop_back()
    3626  {
    3627  VMA_HEAVY_ASSERT(m_Count > 0);
    3628  resize(size() - 1);
    3629  }
    3630 
    3631  void push_front(const T& src)
    3632  {
    3633  insert(0, src);
    3634  }
    3635 
    3636  void pop_front()
    3637  {
    3638  VMA_HEAVY_ASSERT(m_Count > 0);
    3639  remove(0);
    3640  }
    3641 
    3642  typedef T* iterator;
    3643 
    3644  iterator begin() { return m_pArray; }
    3645  iterator end() { return m_pArray + m_Count; }
    3646 
    3647 private:
    3648  AllocatorT m_Allocator;
    3649  T* m_pArray;
    3650  size_t m_Count;
    3651  size_t m_Capacity;
    3652 };
    3653 
    3654 template<typename T, typename allocatorT>
    3655 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3656 {
    3657  vec.insert(index, item);
    3658 }
    3659 
    3660 template<typename T, typename allocatorT>
    3661 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3662 {
    3663  vec.remove(index);
    3664 }
    3665 
    3666 #endif // #if VMA_USE_STL_VECTOR
    3667 
    3668 template<typename CmpLess, typename VectorT>
    3669 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3670 {
    3671  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3672  vector.data(),
    3673  vector.data() + vector.size(),
    3674  value,
    3675  CmpLess()) - vector.data();
    3676  VmaVectorInsert(vector, indexToInsert, value);
    3677  return indexToInsert;
    3678 }
    3679 
    3680 template<typename CmpLess, typename VectorT>
    3681 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3682 {
    3683  CmpLess comparator;
    3684  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3685  vector.begin(),
    3686  vector.end(),
    3687  value,
    3688  comparator);
    3689  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3690  {
    3691  size_t indexToRemove = it - vector.begin();
    3692  VmaVectorRemove(vector, indexToRemove);
    3693  return true;
    3694  }
    3695  return false;
    3696 }
    3697 
    3698 template<typename CmpLess, typename IterT, typename KeyT>
    3699 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3700 {
    3701  CmpLess comparator;
    3702  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3703  beg, end, value, comparator);
    3704  if(it == end ||
    3705  (!comparator(*it, value) && !comparator(value, *it)))
    3706  {
    3707  return it;
    3708  }
    3709  return end;
    3710 }
    3711 
    3713 // class VmaPoolAllocator
    3714 
    3715 /*
    3716 Allocator for objects of type T using a list of arrays (pools) to speed up
    3717 allocation. Number of elements that can be allocated is not bounded because
    3718 allocator can create multiple blocks.
    3719 */
    3720 template<typename T>
    3721 class VmaPoolAllocator
    3722 {
    3723  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3724 public:
    3725  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3726  ~VmaPoolAllocator();
    3727  void Clear();
    3728  T* Alloc();
    3729  void Free(T* ptr);
    3730 
    3731 private:
    3732  union Item
    3733  {
    3734  uint32_t NextFreeIndex;
    3735  T Value;
    3736  };
    3737 
    3738  struct ItemBlock
    3739  {
    3740  Item* pItems;
    3741  uint32_t FirstFreeIndex;
    3742  };
    3743 
    3744  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3745  size_t m_ItemsPerBlock;
    3746  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3747 
    3748  ItemBlock& CreateNewBlock();
    3749 };
    3750 
    3751 template<typename T>
    3752 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3753  m_pAllocationCallbacks(pAllocationCallbacks),
    3754  m_ItemsPerBlock(itemsPerBlock),
    3755  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3756 {
    3757  VMA_ASSERT(itemsPerBlock > 0);
    3758 }
    3759 
    3760 template<typename T>
    3761 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3762 {
    3763  Clear();
    3764 }
    3765 
    3766 template<typename T>
    3767 void VmaPoolAllocator<T>::Clear()
    3768 {
    3769  for(size_t i = m_ItemBlocks.size(); i--; )
    3770  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3771  m_ItemBlocks.clear();
    3772 }
    3773 
    3774 template<typename T>
    3775 T* VmaPoolAllocator<T>::Alloc()
    3776 {
    3777  for(size_t i = m_ItemBlocks.size(); i--; )
    3778  {
    3779  ItemBlock& block = m_ItemBlocks[i];
    3780  // This block has some free items: Use first one.
    3781  if(block.FirstFreeIndex != UINT32_MAX)
    3782  {
    3783  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3784  block.FirstFreeIndex = pItem->NextFreeIndex;
    3785  return &pItem->Value;
    3786  }
    3787  }
    3788 
    3789  // No block has free item: Create new one and use it.
    3790  ItemBlock& newBlock = CreateNewBlock();
    3791  Item* const pItem = &newBlock.pItems[0];
    3792  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3793  return &pItem->Value;
    3794 }
    3795 
    3796 template<typename T>
    3797 void VmaPoolAllocator<T>::Free(T* ptr)
    3798 {
    3799  // Search all memory blocks to find ptr.
    3800  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3801  {
    3802  ItemBlock& block = m_ItemBlocks[i];
    3803 
    3804  // Casting to union.
    3805  Item* pItemPtr;
    3806  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3807 
    3808  // Check if pItemPtr is in address range of this block.
    3809  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3810  {
    3811  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3812  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3813  block.FirstFreeIndex = index;
    3814  return;
    3815  }
    3816  }
    3817  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3818 }
    3819 
    3820 template<typename T>
    3821 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3822 {
    3823  ItemBlock newBlock = {
    3824  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3825 
    3826  m_ItemBlocks.push_back(newBlock);
    3827 
    3828  // Setup singly-linked list of all free items in this block.
    3829  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3830  newBlock.pItems[i].NextFreeIndex = i + 1;
    3831  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3832  return m_ItemBlocks.back();
    3833 }
    3834 
    3836 // class VmaRawList, VmaList
    3837 
    3838 #if VMA_USE_STL_LIST
    3839 
    3840 #define VmaList std::list
    3841 
    3842 #else // #if VMA_USE_STL_LIST
    3843 
    3844 template<typename T>
    3845 struct VmaListItem
    3846 {
    3847  VmaListItem* pPrev;
    3848  VmaListItem* pNext;
    3849  T Value;
    3850 };
    3851 
    3852 // Doubly linked list.
    3853 template<typename T>
    3854 class VmaRawList
    3855 {
    3856  VMA_CLASS_NO_COPY(VmaRawList)
    3857 public:
    3858  typedef VmaListItem<T> ItemType;
    3859 
    3860  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3861  ~VmaRawList();
    3862  void Clear();
    3863 
    3864  size_t GetCount() const { return m_Count; }
    3865  bool IsEmpty() const { return m_Count == 0; }
    3866 
    3867  ItemType* Front() { return m_pFront; }
    3868  const ItemType* Front() const { return m_pFront; }
    3869  ItemType* Back() { return m_pBack; }
    3870  const ItemType* Back() const { return m_pBack; }
    3871 
    3872  ItemType* PushBack();
    3873  ItemType* PushFront();
    3874  ItemType* PushBack(const T& value);
    3875  ItemType* PushFront(const T& value);
    3876  void PopBack();
    3877  void PopFront();
    3878 
    3879  // Item can be null - it means PushBack.
    3880  ItemType* InsertBefore(ItemType* pItem);
    3881  // Item can be null - it means PushFront.
    3882  ItemType* InsertAfter(ItemType* pItem);
    3883 
    3884  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3885  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3886 
    3887  void Remove(ItemType* pItem);
    3888 
    3889 private:
    3890  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3891  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3892  ItemType* m_pFront;
    3893  ItemType* m_pBack;
    3894  size_t m_Count;
    3895 };
    3896 
    3897 template<typename T>
    3898 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3899  m_pAllocationCallbacks(pAllocationCallbacks),
    3900  m_ItemAllocator(pAllocationCallbacks, 128),
    3901  m_pFront(VMA_NULL),
    3902  m_pBack(VMA_NULL),
    3903  m_Count(0)
    3904 {
    3905 }
    3906 
    3907 template<typename T>
    3908 VmaRawList<T>::~VmaRawList()
    3909 {
    3910  // Intentionally not calling Clear, because that would be unnecessary
    3911  // computations to return all items to m_ItemAllocator as free.
    3912 }
    3913 
    3914 template<typename T>
    3915 void VmaRawList<T>::Clear()
    3916 {
    3917  if(IsEmpty() == false)
    3918  {
    3919  ItemType* pItem = m_pBack;
    3920  while(pItem != VMA_NULL)
    3921  {
    3922  ItemType* const pPrevItem = pItem->pPrev;
    3923  m_ItemAllocator.Free(pItem);
    3924  pItem = pPrevItem;
    3925  }
    3926  m_pFront = VMA_NULL;
    3927  m_pBack = VMA_NULL;
    3928  m_Count = 0;
    3929  }
    3930 }
    3931 
    3932 template<typename T>
    3933 VmaListItem<T>* VmaRawList<T>::PushBack()
    3934 {
    3935  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3936  pNewItem->pNext = VMA_NULL;
    3937  if(IsEmpty())
    3938  {
    3939  pNewItem->pPrev = VMA_NULL;
    3940  m_pFront = pNewItem;
    3941  m_pBack = pNewItem;
    3942  m_Count = 1;
    3943  }
    3944  else
    3945  {
    3946  pNewItem->pPrev = m_pBack;
    3947  m_pBack->pNext = pNewItem;
    3948  m_pBack = pNewItem;
    3949  ++m_Count;
    3950  }
    3951  return pNewItem;
    3952 }
    3953 
    3954 template<typename T>
    3955 VmaListItem<T>* VmaRawList<T>::PushFront()
    3956 {
    3957  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3958  pNewItem->pPrev = VMA_NULL;
    3959  if(IsEmpty())
    3960  {
    3961  pNewItem->pNext = VMA_NULL;
    3962  m_pFront = pNewItem;
    3963  m_pBack = pNewItem;
    3964  m_Count = 1;
    3965  }
    3966  else
    3967  {
    3968  pNewItem->pNext = m_pFront;
    3969  m_pFront->pPrev = pNewItem;
    3970  m_pFront = pNewItem;
    3971  ++m_Count;
    3972  }
    3973  return pNewItem;
    3974 }
    3975 
    3976 template<typename T>
    3977 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    3978 {
    3979  ItemType* const pNewItem = PushBack();
    3980  pNewItem->Value = value;
    3981  return pNewItem;
    3982 }
    3983 
    3984 template<typename T>
    3985 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    3986 {
    3987  ItemType* const pNewItem = PushFront();
    3988  pNewItem->Value = value;
    3989  return pNewItem;
    3990 }
    3991 
    3992 template<typename T>
    3993 void VmaRawList<T>::PopBack()
    3994 {
    3995  VMA_HEAVY_ASSERT(m_Count > 0);
    3996  ItemType* const pBackItem = m_pBack;
    3997  ItemType* const pPrevItem = pBackItem->pPrev;
    3998  if(pPrevItem != VMA_NULL)
    3999  {
    4000  pPrevItem->pNext = VMA_NULL;
    4001  }
    4002  m_pBack = pPrevItem;
    4003  m_ItemAllocator.Free(pBackItem);
    4004  --m_Count;
    4005 }
    4006 
    4007 template<typename T>
    4008 void VmaRawList<T>::PopFront()
    4009 {
    4010  VMA_HEAVY_ASSERT(m_Count > 0);
    4011  ItemType* const pFrontItem = m_pFront;
    4012  ItemType* const pNextItem = pFrontItem->pNext;
    4013  if(pNextItem != VMA_NULL)
    4014  {
    4015  pNextItem->pPrev = VMA_NULL;
    4016  }
    4017  m_pFront = pNextItem;
    4018  m_ItemAllocator.Free(pFrontItem);
    4019  --m_Count;
    4020 }
    4021 
    4022 template<typename T>
    4023 void VmaRawList<T>::Remove(ItemType* pItem)
    4024 {
    4025  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4026  VMA_HEAVY_ASSERT(m_Count > 0);
    4027 
    4028  if(pItem->pPrev != VMA_NULL)
    4029  {
    4030  pItem->pPrev->pNext = pItem->pNext;
    4031  }
    4032  else
    4033  {
    4034  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4035  m_pFront = pItem->pNext;
    4036  }
    4037 
    4038  if(pItem->pNext != VMA_NULL)
    4039  {
    4040  pItem->pNext->pPrev = pItem->pPrev;
    4041  }
    4042  else
    4043  {
    4044  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4045  m_pBack = pItem->pPrev;
    4046  }
    4047 
    4048  m_ItemAllocator.Free(pItem);
    4049  --m_Count;
    4050 }
    4051 
    4052 template<typename T>
    4053 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4054 {
    4055  if(pItem != VMA_NULL)
    4056  {
    4057  ItemType* const prevItem = pItem->pPrev;
    4058  ItemType* const newItem = m_ItemAllocator.Alloc();
    4059  newItem->pPrev = prevItem;
    4060  newItem->pNext = pItem;
    4061  pItem->pPrev = newItem;
    4062  if(prevItem != VMA_NULL)
    4063  {
    4064  prevItem->pNext = newItem;
    4065  }
    4066  else
    4067  {
    4068  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4069  m_pFront = newItem;
    4070  }
    4071  ++m_Count;
    4072  return newItem;
    4073  }
    4074  else
    4075  return PushBack();
    4076 }
    4077 
    4078 template<typename T>
    4079 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4080 {
    4081  if(pItem != VMA_NULL)
    4082  {
    4083  ItemType* const nextItem = pItem->pNext;
    4084  ItemType* const newItem = m_ItemAllocator.Alloc();
    4085  newItem->pNext = nextItem;
    4086  newItem->pPrev = pItem;
    4087  pItem->pNext = newItem;
    4088  if(nextItem != VMA_NULL)
    4089  {
    4090  nextItem->pPrev = newItem;
    4091  }
    4092  else
    4093  {
    4094  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4095  m_pBack = newItem;
    4096  }
    4097  ++m_Count;
    4098  return newItem;
    4099  }
    4100  else
    4101  return PushFront();
    4102 }
    4103 
    4104 template<typename T>
    4105 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4106 {
    4107  ItemType* const newItem = InsertBefore(pItem);
    4108  newItem->Value = value;
    4109  return newItem;
    4110 }
    4111 
    4112 template<typename T>
    4113 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4114 {
    4115  ItemType* const newItem = InsertAfter(pItem);
    4116  newItem->Value = value;
    4117  return newItem;
    4118 }
    4119 
    4120 template<typename T, typename AllocatorT>
    4121 class VmaList
    4122 {
    4123  VMA_CLASS_NO_COPY(VmaList)
    4124 public:
    4125  class iterator
    4126  {
    4127  public:
    4128  iterator() :
    4129  m_pList(VMA_NULL),
    4130  m_pItem(VMA_NULL)
    4131  {
    4132  }
    4133 
    4134  T& operator*() const
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4137  return m_pItem->Value;
    4138  }
    4139  T* operator->() const
    4140  {
    4141  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4142  return &m_pItem->Value;
    4143  }
    4144 
    4145  iterator& operator++()
    4146  {
    4147  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4148  m_pItem = m_pItem->pNext;
    4149  return *this;
    4150  }
    4151  iterator& operator--()
    4152  {
    4153  if(m_pItem != VMA_NULL)
    4154  {
    4155  m_pItem = m_pItem->pPrev;
    4156  }
    4157  else
    4158  {
    4159  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4160  m_pItem = m_pList->Back();
    4161  }
    4162  return *this;
    4163  }
    4164 
    4165  iterator operator++(int)
    4166  {
    4167  iterator result = *this;
    4168  ++*this;
    4169  return result;
    4170  }
    4171  iterator operator--(int)
    4172  {
    4173  iterator result = *this;
    4174  --*this;
    4175  return result;
    4176  }
    4177 
    4178  bool operator==(const iterator& rhs) const
    4179  {
    4180  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4181  return m_pItem == rhs.m_pItem;
    4182  }
    4183  bool operator!=(const iterator& rhs) const
    4184  {
    4185  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4186  return m_pItem != rhs.m_pItem;
    4187  }
    4188 
    4189  private:
    4190  VmaRawList<T>* m_pList;
    4191  VmaListItem<T>* m_pItem;
    4192 
    4193  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4194  m_pList(pList),
    4195  m_pItem(pItem)
    4196  {
    4197  }
    4198 
    4199  friend class VmaList<T, AllocatorT>;
    4200  };
    4201 
    4202  class const_iterator
    4203  {
    4204  public:
    4205  const_iterator() :
    4206  m_pList(VMA_NULL),
    4207  m_pItem(VMA_NULL)
    4208  {
    4209  }
    4210 
    4211  const_iterator(const iterator& src) :
    4212  m_pList(src.m_pList),
    4213  m_pItem(src.m_pItem)
    4214  {
    4215  }
    4216 
    4217  const T& operator*() const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4220  return m_pItem->Value;
    4221  }
    4222  const T* operator->() const
    4223  {
    4224  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4225  return &m_pItem->Value;
    4226  }
    4227 
    4228  const_iterator& operator++()
    4229  {
    4230  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4231  m_pItem = m_pItem->pNext;
    4232  return *this;
    4233  }
    4234  const_iterator& operator--()
    4235  {
    4236  if(m_pItem != VMA_NULL)
    4237  {
    4238  m_pItem = m_pItem->pPrev;
    4239  }
    4240  else
    4241  {
    4242  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4243  m_pItem = m_pList->Back();
    4244  }
    4245  return *this;
    4246  }
    4247 
    4248  const_iterator operator++(int)
    4249  {
    4250  const_iterator result = *this;
    4251  ++*this;
    4252  return result;
    4253  }
    4254  const_iterator operator--(int)
    4255  {
    4256  const_iterator result = *this;
    4257  --*this;
    4258  return result;
    4259  }
    4260 
    4261  bool operator==(const const_iterator& rhs) const
    4262  {
    4263  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4264  return m_pItem == rhs.m_pItem;
    4265  }
    4266  bool operator!=(const const_iterator& rhs) const
    4267  {
    4268  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4269  return m_pItem != rhs.m_pItem;
    4270  }
    4271 
    4272  private:
    4273  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4274  m_pList(pList),
    4275  m_pItem(pItem)
    4276  {
    4277  }
    4278 
    4279  const VmaRawList<T>* m_pList;
    4280  const VmaListItem<T>* m_pItem;
    4281 
    4282  friend class VmaList<T, AllocatorT>;
    4283  };
    4284 
    4285  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4286 
    4287  bool empty() const { return m_RawList.IsEmpty(); }
    4288  size_t size() const { return m_RawList.GetCount(); }
    4289 
    4290  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4291  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4292 
    4293  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4294  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4295 
    4296  void clear() { m_RawList.Clear(); }
    4297  void push_back(const T& value) { m_RawList.PushBack(value); }
    4298  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4299  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4300 
    4301 private:
    4302  VmaRawList<T> m_RawList;
    4303 };
    4304 
    4305 #endif // #if VMA_USE_STL_LIST
    4306 
    4308 // class VmaMap
    4309 
    4310 // Unused in this version.
    4311 #if 0
    4312 
    4313 #if VMA_USE_STL_UNORDERED_MAP
    4314 
    4315 #define VmaPair std::pair
    4316 
    4317 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4318  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4319 
    4320 #else // #if VMA_USE_STL_UNORDERED_MAP
    4321 
    4322 template<typename T1, typename T2>
    4323 struct VmaPair
    4324 {
    4325  T1 first;
    4326  T2 second;
    4327 
    4328  VmaPair() : first(), second() { }
    4329  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4330 };
    4331 
    4332 /* Class compatible with subset of interface of std::unordered_map.
    4333 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4334 */
    4335 template<typename KeyT, typename ValueT>
    4336 class VmaMap
    4337 {
    4338 public:
    4339  typedef VmaPair<KeyT, ValueT> PairType;
    4340  typedef PairType* iterator;
    4341 
    4342  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4343 
    4344  iterator begin() { return m_Vector.begin(); }
    4345  iterator end() { return m_Vector.end(); }
    4346 
    4347  void insert(const PairType& pair);
    4348  iterator find(const KeyT& key);
    4349  void erase(iterator it);
    4350 
    4351 private:
    4352  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4353 };
    4354 
    4355 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4356 
    4357 template<typename FirstT, typename SecondT>
    4358 struct VmaPairFirstLess
    4359 {
    4360  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4361  {
    4362  return lhs.first < rhs.first;
    4363  }
    4364  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4365  {
    4366  return lhs.first < rhsFirst;
    4367  }
    4368 };
    4369 
    4370 template<typename KeyT, typename ValueT>
    4371 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4372 {
    4373  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4374  m_Vector.data(),
    4375  m_Vector.data() + m_Vector.size(),
    4376  pair,
    4377  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4378  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4379 }
    4380 
    4381 template<typename KeyT, typename ValueT>
    4382 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4383 {
    4384  PairType* it = VmaBinaryFindFirstNotLess(
    4385  m_Vector.data(),
    4386  m_Vector.data() + m_Vector.size(),
    4387  key,
    4388  VmaPairFirstLess<KeyT, ValueT>());
    4389  if((it != m_Vector.end()) && (it->first == key))
    4390  {
    4391  return it;
    4392  }
    4393  else
    4394  {
    4395  return m_Vector.end();
    4396  }
    4397 }
    4398 
    4399 template<typename KeyT, typename ValueT>
    4400 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4401 {
    4402  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4403 }
    4404 
    4405 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4406 
    4407 #endif // #if 0
    4408 
    4410 
    4411 class VmaDeviceMemoryBlock;
    4412 
    4413 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4414 
    4415 struct VmaAllocation_T
    4416 {
    4417  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4418 private:
    4419  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4420 
    4421  enum FLAGS
    4422  {
    4423  FLAG_USER_DATA_STRING = 0x01,
    4424  };
    4425 
    4426 public:
    4427  enum ALLOCATION_TYPE
    4428  {
    4429  ALLOCATION_TYPE_NONE,
    4430  ALLOCATION_TYPE_BLOCK,
    4431  ALLOCATION_TYPE_DEDICATED,
    4432  };
    4433 
    4434  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4435  m_Alignment(1),
    4436  m_Size(0),
    4437  m_pUserData(VMA_NULL),
    4438  m_LastUseFrameIndex(currentFrameIndex),
    4439  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4440  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4441  m_MapCount(0),
    4442  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4443  {
    4444 #if VMA_STATS_STRING_ENABLED
    4445  m_CreationFrameIndex = currentFrameIndex;
    4446  m_BufferImageUsage = 0;
    4447 #endif
    4448  }
    4449 
    4450  ~VmaAllocation_T()
    4451  {
    4452  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4453 
    4454  // Check if owned string was freed.
    4455  VMA_ASSERT(m_pUserData == VMA_NULL);
    4456  }
    4457 
    4458  void InitBlockAllocation(
    4459  VmaPool hPool,
    4460  VmaDeviceMemoryBlock* block,
    4461  VkDeviceSize offset,
    4462  VkDeviceSize alignment,
    4463  VkDeviceSize size,
    4464  VmaSuballocationType suballocationType,
    4465  bool mapped,
    4466  bool canBecomeLost)
    4467  {
    4468  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4469  VMA_ASSERT(block != VMA_NULL);
    4470  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4471  m_Alignment = alignment;
    4472  m_Size = size;
    4473  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4474  m_SuballocationType = (uint8_t)suballocationType;
    4475  m_BlockAllocation.m_hPool = hPool;
    4476  m_BlockAllocation.m_Block = block;
    4477  m_BlockAllocation.m_Offset = offset;
    4478  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4479  }
    4480 
    4481  void InitLost()
    4482  {
    4483  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4484  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4485  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4486  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4487  m_BlockAllocation.m_Block = VMA_NULL;
    4488  m_BlockAllocation.m_Offset = 0;
    4489  m_BlockAllocation.m_CanBecomeLost = true;
    4490  }
    4491 
    4492  void ChangeBlockAllocation(
    4493  VmaAllocator hAllocator,
    4494  VmaDeviceMemoryBlock* block,
    4495  VkDeviceSize offset);
    4496 
    4497  // pMappedData not null means allocation is created with MAPPED flag.
    4498  void InitDedicatedAllocation(
    4499  uint32_t memoryTypeIndex,
    4500  VkDeviceMemory hMemory,
    4501  VmaSuballocationType suballocationType,
    4502  void* pMappedData,
    4503  VkDeviceSize size)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4508  m_Alignment = 0;
    4509  m_Size = size;
    4510  m_SuballocationType = (uint8_t)suballocationType;
    4511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4512  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4513  m_DedicatedAllocation.m_hMemory = hMemory;
    4514  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4515  }
    4516 
    4517  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4518  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4519  VkDeviceSize GetSize() const { return m_Size; }
    4520  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4521  void* GetUserData() const { return m_pUserData; }
    4522  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4523  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4524 
    4525  VmaDeviceMemoryBlock* GetBlock() const
    4526  {
    4527  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4528  return m_BlockAllocation.m_Block;
    4529  }
    4530  VkDeviceSize GetOffset() const;
    4531  VkDeviceMemory GetMemory() const;
    4532  uint32_t GetMemoryTypeIndex() const;
    4533  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4534  void* GetMappedData() const;
    4535  bool CanBecomeLost() const;
    4536  VmaPool GetPool() const;
    4537 
    4538  uint32_t GetLastUseFrameIndex() const
    4539  {
    4540  return m_LastUseFrameIndex.load();
    4541  }
    4542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4543  {
    4544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4545  }
    4546  /*
    4547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4549  - Else, returns false.
    4550 
    4551  If hAllocation is already lost, assert - you should not call it then.
    4552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4553  */
    4554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4555 
    4556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4557  {
    4558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4559  outInfo.blockCount = 1;
    4560  outInfo.allocationCount = 1;
    4561  outInfo.unusedRangeCount = 0;
    4562  outInfo.usedBytes = m_Size;
    4563  outInfo.unusedBytes = 0;
    4564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4565  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4566  outInfo.unusedRangeSizeMax = 0;
    4567  }
    4568 
    4569  void BlockAllocMap();
    4570  void BlockAllocUnmap();
    4571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4573 
    4574 #if VMA_STATS_STRING_ENABLED
    4575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4577 
    4578  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4579  {
    4580  VMA_ASSERT(m_BufferImageUsage == 0);
    4581  m_BufferImageUsage = bufferImageUsage;
    4582  }
    4583 
    4584  void PrintParameters(class VmaJsonWriter& json) const;
    4585 #endif
    4586 
    4587 private:
    4588  VkDeviceSize m_Alignment;
    4589  VkDeviceSize m_Size;
    4590  void* m_pUserData;
    4591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4592  uint8_t m_Type; // ALLOCATION_TYPE
    4593  uint8_t m_SuballocationType; // VmaSuballocationType
    4594  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4595  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4596  uint8_t m_MapCount;
    4597  uint8_t m_Flags; // enum FLAGS
    4598 
    4599  // Allocation out of VmaDeviceMemoryBlock.
    4600  struct BlockAllocation
    4601  {
    4602  VmaPool m_hPool; // Null if belongs to general memory.
    4603  VmaDeviceMemoryBlock* m_Block;
    4604  VkDeviceSize m_Offset;
    4605  bool m_CanBecomeLost;
    4606  };
    4607 
    4608  // Allocation for an object that has its own private VkDeviceMemory.
    4609  struct DedicatedAllocation
    4610  {
    4611  uint32_t m_MemoryTypeIndex;
    4612  VkDeviceMemory m_hMemory;
    4613  void* m_pMappedData; // Not null means memory is mapped.
    4614  };
    4615 
    4616  union
    4617  {
    4618  // Allocation out of VmaDeviceMemoryBlock.
    4619  BlockAllocation m_BlockAllocation;
    4620  // Allocation for an object that has its own private VkDeviceMemory.
    4621  DedicatedAllocation m_DedicatedAllocation;
    4622  };
    4623 
    4624 #if VMA_STATS_STRING_ENABLED
    4625  uint32_t m_CreationFrameIndex;
    4626  uint32_t m_BufferImageUsage; // 0 if unknown.
    4627 #endif
    4628 
    4629  void FreeUserDataString(VmaAllocator hAllocator);
    4630 };
    4631 
    4632 /*
    4633 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4634 allocated memory block or free.
    4635 */
    4636 struct VmaSuballocation
    4637 {
    4638  VkDeviceSize offset;
    4639  VkDeviceSize size;
    4640  VmaAllocation hAllocation;
    4641  VmaSuballocationType type;
    4642 };
    4643 
    4644 // Comparator for offsets.
    4645 struct VmaSuballocationOffsetLess
    4646 {
    4647  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4648  {
    4649  return lhs.offset < rhs.offset;
    4650  }
    4651 };
    4652 struct VmaSuballocationOffsetGreater
    4653 {
    4654  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4655  {
    4656  return lhs.offset > rhs.offset;
    4657  }
    4658 };
    4659 
    4660 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4661 
    4662 // Cost of one additional allocation lost, as equivalent in bytes.
    4663 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4664 
    4665 /*
    4666 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4667 
    4668 If canMakeOtherLost was false:
    4669 - item points to a FREE suballocation.
    4670 - itemsToMakeLostCount is 0.
    4671 
    4672 If canMakeOtherLost was true:
    4673 - item points to first of sequence of suballocations, which are either FREE,
    4674  or point to VmaAllocations that can become lost.
    4675 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4676  the requested allocation to succeed.
    4677 */
    4678 struct VmaAllocationRequest
    4679 {
    4680  VkDeviceSize offset;
    4681  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4682  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4683  VmaSuballocationList::iterator item;
    4684  size_t itemsToMakeLostCount;
    4685  void* customData;
    4686 
    4687  VkDeviceSize CalcCost() const
    4688  {
    4689  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4690  }
    4691 };
    4692 
    4693 /*
    4694 Data structure used for bookkeeping of allocations and unused ranges of memory
    4695 in a single VkDeviceMemory block.
    4696 */
    4697 class VmaBlockMetadata
    4698 {
    4699 public:
    4700  VmaBlockMetadata(VmaAllocator hAllocator);
    4701  virtual ~VmaBlockMetadata() { }
    4702  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4703 
    4704  // Validates all data structures inside this object. If not valid, returns false.
    4705  virtual bool Validate() const = 0;
    4706  VkDeviceSize GetSize() const { return m_Size; }
    4707  virtual size_t GetAllocationCount() const = 0;
    4708  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4709  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4710  // Returns true if this block is empty - contains only single free suballocation.
    4711  virtual bool IsEmpty() const = 0;
    4712 
    4713  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4714  // Shouldn't modify blockCount.
    4715  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4716 
    4717 #if VMA_STATS_STRING_ENABLED
    4718  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4719 #endif
    4720 
    4721  // Tries to find a place for suballocation with given parameters inside this block.
    4722  // If succeeded, fills pAllocationRequest and returns true.
    4723  // If failed, returns false.
    4724  virtual bool CreateAllocationRequest(
    4725  uint32_t currentFrameIndex,
    4726  uint32_t frameInUseCount,
    4727  VkDeviceSize bufferImageGranularity,
    4728  VkDeviceSize allocSize,
    4729  VkDeviceSize allocAlignment,
    4730  bool upperAddress,
    4731  VmaSuballocationType allocType,
    4732  bool canMakeOtherLost,
    4733  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4734  VmaAllocationRequest* pAllocationRequest) = 0;
    4735 
    4736  virtual bool MakeRequestedAllocationsLost(
    4737  uint32_t currentFrameIndex,
    4738  uint32_t frameInUseCount,
    4739  VmaAllocationRequest* pAllocationRequest) = 0;
    4740 
    4741  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4742 
    4743  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4744 
    4745  // Makes actual allocation based on request. Request must already be checked and valid.
    4746  virtual void Alloc(
    4747  const VmaAllocationRequest& request,
    4748  VmaSuballocationType type,
    4749  VkDeviceSize allocSize,
    4750  bool upperAddress,
    4751  VmaAllocation hAllocation) = 0;
    4752 
    4753  // Frees suballocation assigned to given memory region.
    4754  virtual void Free(const VmaAllocation allocation) = 0;
    4755  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4756 
    4757 protected:
    4758  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4759 
    4760 #if VMA_STATS_STRING_ENABLED
    4761  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4762  VkDeviceSize unusedBytes,
    4763  size_t allocationCount,
    4764  size_t unusedRangeCount) const;
    4765  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4766  VkDeviceSize offset,
    4767  VmaAllocation hAllocation) const;
    4768  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4769  VkDeviceSize offset,
    4770  VkDeviceSize size) const;
    4771  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4772 #endif
    4773 
    4774 private:
    4775  VkDeviceSize m_Size;
    4776  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4777 };
    4778 
    4779 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4780  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4781  return false; \
    4782  } } while(false)
    4783 
    4784 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4785 {
    4786  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4787 public:
    4788  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4789  virtual ~VmaBlockMetadata_Generic();
    4790  virtual void Init(VkDeviceSize size);
    4791 
    4792  virtual bool Validate() const;
    4793  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4794  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4795  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4796  virtual bool IsEmpty() const;
    4797 
    4798  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4799  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4800 
    4801 #if VMA_STATS_STRING_ENABLED
    4802  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4803 #endif
    4804 
    4805  virtual bool CreateAllocationRequest(
    4806  uint32_t currentFrameIndex,
    4807  uint32_t frameInUseCount,
    4808  VkDeviceSize bufferImageGranularity,
    4809  VkDeviceSize allocSize,
    4810  VkDeviceSize allocAlignment,
    4811  bool upperAddress,
    4812  VmaSuballocationType allocType,
    4813  bool canMakeOtherLost,
    4814  uint32_t strategy,
    4815  VmaAllocationRequest* pAllocationRequest);
    4816 
    4817  virtual bool MakeRequestedAllocationsLost(
    4818  uint32_t currentFrameIndex,
    4819  uint32_t frameInUseCount,
    4820  VmaAllocationRequest* pAllocationRequest);
    4821 
    4822  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4823 
    4824  virtual VkResult CheckCorruption(const void* pBlockData);
    4825 
    4826  virtual void Alloc(
    4827  const VmaAllocationRequest& request,
    4828  VmaSuballocationType type,
    4829  VkDeviceSize allocSize,
    4830  bool upperAddress,
    4831  VmaAllocation hAllocation);
    4832 
    4833  virtual void Free(const VmaAllocation allocation);
    4834  virtual void FreeAtOffset(VkDeviceSize offset);
    4835 
    4836 private:
    4837  uint32_t m_FreeCount;
    4838  VkDeviceSize m_SumFreeSize;
    4839  VmaSuballocationList m_Suballocations;
    4840  // Suballocations that are free and have size greater than certain threshold.
    4841  // Sorted by size, ascending.
    4842  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4843 
    4844  bool ValidateFreeSuballocationList() const;
    4845 
    4846  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4847  // If yes, fills pOffset and returns true. If no, returns false.
    4848  bool CheckAllocation(
    4849  uint32_t currentFrameIndex,
    4850  uint32_t frameInUseCount,
    4851  VkDeviceSize bufferImageGranularity,
    4852  VkDeviceSize allocSize,
    4853  VkDeviceSize allocAlignment,
    4854  VmaSuballocationType allocType,
    4855  VmaSuballocationList::const_iterator suballocItem,
    4856  bool canMakeOtherLost,
    4857  VkDeviceSize* pOffset,
    4858  size_t* itemsToMakeLostCount,
    4859  VkDeviceSize* pSumFreeSize,
    4860  VkDeviceSize* pSumItemSize) const;
    4861  // Given free suballocation, it merges it with following one, which must also be free.
    4862  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4863  // Releases given suballocation, making it free.
    4864  // Merges it with adjacent free suballocations if applicable.
    4865  // Returns iterator to new free suballocation at this place.
    4866  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4867  // Given free suballocation, it inserts it into sorted list of
    4868  // m_FreeSuballocationsBySize if it's suitable.
    4869  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4870  // Given free suballocation, it removes it from sorted list of
    4871  // m_FreeSuballocationsBySize if it's suitable.
    4872  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4873 };
    4874 
    4875 /*
    4876 Allocations and their references in internal data structure look like this:
    4877 
    4878 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4879 
    4880  0 +-------+
    4881  | |
    4882  | |
    4883  | |
    4884  +-------+
    4885  | Alloc | 1st[m_1stNullItemsBeginCount]
    4886  +-------+
    4887  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4888  +-------+
    4889  | ... |
    4890  +-------+
    4891  | Alloc | 1st[1st.size() - 1]
    4892  +-------+
    4893  | |
    4894  | |
    4895  | |
    4896 GetSize() +-------+
    4897 
    4898 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4899 
    4900  0 +-------+
    4901  | Alloc | 2nd[0]
    4902  +-------+
    4903  | Alloc | 2nd[1]
    4904  +-------+
    4905  | ... |
    4906  +-------+
    4907  | Alloc | 2nd[2nd.size() - 1]
    4908  +-------+
    4909  | |
    4910  | |
    4911  | |
    4912  +-------+
    4913  | Alloc | 1st[m_1stNullItemsBeginCount]
    4914  +-------+
    4915  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4916  +-------+
    4917  | ... |
    4918  +-------+
    4919  | Alloc | 1st[1st.size() - 1]
    4920  +-------+
    4921  | |
    4922 GetSize() +-------+
    4923 
    4924 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4925 
    4926  0 +-------+
    4927  | |
    4928  | |
    4929  | |
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount]
    4932  +-------+
    4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4934  +-------+
    4935  | ... |
    4936  +-------+
    4937  | Alloc | 1st[1st.size() - 1]
    4938  +-------+
    4939  | |
    4940  | |
    4941  | |
    4942  +-------+
    4943  | Alloc | 2nd[2nd.size() - 1]
    4944  +-------+
    4945  | ... |
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | Alloc | 2nd[0]
    4950 GetSize() +-------+
    4951 
    4952 */
    4953 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4954 {
    4955  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    4956 public:
    4957  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    4958  virtual ~VmaBlockMetadata_Linear();
    4959  virtual void Init(VkDeviceSize size);
    4960 
    4961  virtual bool Validate() const;
    4962  virtual size_t GetAllocationCount() const;
    4963  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4964  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4965  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    4966 
    4967  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4968  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4969 
    4970 #if VMA_STATS_STRING_ENABLED
    4971  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4972 #endif
    4973 
    4974  virtual bool CreateAllocationRequest(
    4975  uint32_t currentFrameIndex,
    4976  uint32_t frameInUseCount,
    4977  VkDeviceSize bufferImageGranularity,
    4978  VkDeviceSize allocSize,
    4979  VkDeviceSize allocAlignment,
    4980  bool upperAddress,
    4981  VmaSuballocationType allocType,
    4982  bool canMakeOtherLost,
    4983  uint32_t strategy,
    4984  VmaAllocationRequest* pAllocationRequest);
    4985 
    4986  virtual bool MakeRequestedAllocationsLost(
    4987  uint32_t currentFrameIndex,
    4988  uint32_t frameInUseCount,
    4989  VmaAllocationRequest* pAllocationRequest);
    4990 
    4991  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4992 
    4993  virtual VkResult CheckCorruption(const void* pBlockData);
    4994 
    4995  virtual void Alloc(
    4996  const VmaAllocationRequest& request,
    4997  VmaSuballocationType type,
    4998  VkDeviceSize allocSize,
    4999  bool upperAddress,
    5000  VmaAllocation hAllocation);
    5001 
    5002  virtual void Free(const VmaAllocation allocation);
    5003  virtual void FreeAtOffset(VkDeviceSize offset);
    5004 
    5005 private:
    5006  /*
    5007  There are two suballocation vectors, used in ping-pong way.
    5008  The one with index m_1stVectorIndex is called 1st.
    5009  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5010  2nd can be non-empty only when 1st is not empty.
    5011  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5012  */
    5013  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5014 
    5015  enum SECOND_VECTOR_MODE
    5016  {
    5017  SECOND_VECTOR_EMPTY,
    5018  /*
    5019  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5020  all have smaller offset.
    5021  */
    5022  SECOND_VECTOR_RING_BUFFER,
    5023  /*
    5024  Suballocations in 2nd vector are upper side of double stack.
    5025  They all have offsets higher than those in 1st vector.
    5026  Top of this stack means smaller offsets, but higher indices in this vector.
    5027  */
    5028  SECOND_VECTOR_DOUBLE_STACK,
    5029  };
    5030 
    5031  VkDeviceSize m_SumFreeSize;
    5032  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5033  uint32_t m_1stVectorIndex;
    5034  SECOND_VECTOR_MODE m_2ndVectorMode;
    5035 
    5036  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5037  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5038  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5039  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5040 
    5041  // Number of items in 1st vector with hAllocation = null at the beginning.
    5042  size_t m_1stNullItemsBeginCount;
    5043  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5044  size_t m_1stNullItemsMiddleCount;
    5045  // Number of items in 2nd vector with hAllocation = null.
    5046  size_t m_2ndNullItemsCount;
    5047 
    5048  bool ShouldCompact1st() const;
    5049  void CleanupAfterFree();
    5050 };
    5051 
    5052 /*
    5053 - GetSize() is the original size of allocated memory block.
    5054 - m_UsableSize is this size aligned down to a power of two.
    5055  All allocations and calculations happen relative to m_UsableSize.
    5056 - GetUnusableSize() is the difference between them.
    5057  It is repoted as separate, unused range, not available for allocations.
    5058 
    5059 Node at level 0 has size = m_UsableSize.
    5060 Each next level contains nodes with size 2 times smaller than current level.
    5061 m_LevelCount is the maximum number of levels to use in the current object.
    5062 */
    5063 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5064 {
    5065  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5066 public:
    5067  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5068  virtual ~VmaBlockMetadata_Buddy();
    5069  virtual void Init(VkDeviceSize size);
    5070 
    5071  virtual bool Validate() const;
    5072  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5073  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5074  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5075  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5076 
    5077  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5078  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5079 
    5080 #if VMA_STATS_STRING_ENABLED
    5081  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5082 #endif
    5083 
    5084  virtual bool CreateAllocationRequest(
    5085  uint32_t currentFrameIndex,
    5086  uint32_t frameInUseCount,
    5087  VkDeviceSize bufferImageGranularity,
    5088  VkDeviceSize allocSize,
    5089  VkDeviceSize allocAlignment,
    5090  bool upperAddress,
    5091  VmaSuballocationType allocType,
    5092  bool canMakeOtherLost,
    5093  uint32_t strategy,
    5094  VmaAllocationRequest* pAllocationRequest);
    5095 
    5096  virtual bool MakeRequestedAllocationsLost(
    5097  uint32_t currentFrameIndex,
    5098  uint32_t frameInUseCount,
    5099  VmaAllocationRequest* pAllocationRequest);
    5100 
    5101  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5102 
    5103  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5104 
    5105  virtual void Alloc(
    5106  const VmaAllocationRequest& request,
    5107  VmaSuballocationType type,
    5108  VkDeviceSize allocSize,
    5109  bool upperAddress,
    5110  VmaAllocation hAllocation);
    5111 
    5112  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5113  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5114 
    5115 private:
    5116  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5117  static const size_t MAX_LEVELS = 30;
    5118 
    5119  struct ValidationContext
    5120  {
    5121  size_t calculatedAllocationCount;
    5122  size_t calculatedFreeCount;
    5123  VkDeviceSize calculatedSumFreeSize;
    5124 
    5125  ValidationContext() :
    5126  calculatedAllocationCount(0),
    5127  calculatedFreeCount(0),
    5128  calculatedSumFreeSize(0) { }
    5129  };
    5130 
    5131  struct Node
    5132  {
    5133  VkDeviceSize offset;
    5134  enum TYPE
    5135  {
    5136  TYPE_FREE,
    5137  TYPE_ALLOCATION,
    5138  TYPE_SPLIT,
    5139  TYPE_COUNT
    5140  } type;
    5141  Node* parent;
    5142  Node* buddy;
    5143 
    5144  union
    5145  {
    5146  struct
    5147  {
    5148  Node* prev;
    5149  Node* next;
    5150  } free;
    5151  struct
    5152  {
    5153  VmaAllocation alloc;
    5154  } allocation;
    5155  struct
    5156  {
    5157  Node* leftChild;
    5158  } split;
    5159  };
    5160  };
    5161 
    5162  // Size of the memory block aligned down to a power of two.
    5163  VkDeviceSize m_UsableSize;
    5164  uint32_t m_LevelCount;
    5165 
    5166  Node* m_Root;
    5167  struct {
    5168  Node* front;
    5169  Node* back;
    5170  } m_FreeList[MAX_LEVELS];
    5171  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5172  size_t m_AllocationCount;
    5173  // Number of nodes in the tree with type == TYPE_FREE.
    5174  size_t m_FreeCount;
    5175  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5176  VkDeviceSize m_SumFreeSize;
    5177 
    5178  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5179  void DeleteNode(Node* node);
    5180  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5181  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5182  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5183  // Alloc passed just for validation. Can be null.
    5184  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5185  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5186  // Adds node to the front of FreeList at given level.
    5187  // node->type must be FREE.
    5188  // node->free.prev, next can be undefined.
    5189  void AddToFreeListFront(uint32_t level, Node* node);
    5190  // Removes node from FreeList at given level.
    5191  // node->type must be FREE.
    5192  // node->free.prev, next stay untouched.
    5193  void RemoveFromFreeList(uint32_t level, Node* node);
    5194 
    5195 #if VMA_STATS_STRING_ENABLED
    5196  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5197 #endif
    5198 };
    5199 
    5200 /*
    5201 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5202 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5203 
    5204 Thread-safety: This class must be externally synchronized.
    5205 */
    5206 class VmaDeviceMemoryBlock
    5207 {
    5208  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5209 public:
    5210  VmaBlockMetadata* m_pMetadata;
    5211 
    5212  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5213 
    5214  ~VmaDeviceMemoryBlock()
    5215  {
    5216  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5217  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5218  }
    5219 
    5220  // Always call after construction.
    5221  void Init(
    5222  VmaAllocator hAllocator,
    5223  uint32_t newMemoryTypeIndex,
    5224  VkDeviceMemory newMemory,
    5225  VkDeviceSize newSize,
    5226  uint32_t id,
    5227  uint32_t algorithm);
    5228  // Always call before destruction.
    5229  void Destroy(VmaAllocator allocator);
    5230 
    5231  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5232  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5233  uint32_t GetId() const { return m_Id; }
    5234  void* GetMappedData() const { return m_pMappedData; }
    5235 
    5236  // Validates all data structures inside this object. If not valid, returns false.
    5237  bool Validate() const;
    5238 
    5239  VkResult CheckCorruption(VmaAllocator hAllocator);
    5240 
    5241  // ppData can be null.
    5242  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5243  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5244 
    5245  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5246  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5247 
    5248  VkResult BindBufferMemory(
    5249  const VmaAllocator hAllocator,
    5250  const VmaAllocation hAllocation,
    5251  VkBuffer hBuffer);
    5252  VkResult BindImageMemory(
    5253  const VmaAllocator hAllocator,
    5254  const VmaAllocation hAllocation,
    5255  VkImage hImage);
    5256 
    5257 private:
    5258  uint32_t m_MemoryTypeIndex;
    5259  uint32_t m_Id;
    5260  VkDeviceMemory m_hMemory;
    5261 
    5262  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5263  // Also protects m_MapCount, m_pMappedData.
    5264  VMA_MUTEX m_Mutex;
    5265  uint32_t m_MapCount;
    5266  void* m_pMappedData;
    5267 };
    5268 
    5269 struct VmaPointerLess
    5270 {
    5271  bool operator()(const void* lhs, const void* rhs) const
    5272  {
    5273  return lhs < rhs;
    5274  }
    5275 };
    5276 
    5277 class VmaDefragmentator;
    5278 
    5279 /*
    5280 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5281 Vulkan memory type.
    5282 
    5283 Synchronized internally with a mutex.
    5284 */
    5285 struct VmaBlockVector
    5286 {
    5287  VMA_CLASS_NO_COPY(VmaBlockVector)
    5288 public:
    5289  VmaBlockVector(
    5290  VmaAllocator hAllocator,
    5291  uint32_t memoryTypeIndex,
    5292  VkDeviceSize preferredBlockSize,
    5293  size_t minBlockCount,
    5294  size_t maxBlockCount,
    5295  VkDeviceSize bufferImageGranularity,
    5296  uint32_t frameInUseCount,
    5297  bool isCustomPool,
    5298  bool explicitBlockSize,
    5299  uint32_t algorithm);
    5300  ~VmaBlockVector();
    5301 
    5302  VkResult CreateMinBlocks();
    5303 
    5304  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5305  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5306  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5307  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5308  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5309 
    5310  void GetPoolStats(VmaPoolStats* pStats);
    5311 
    5312  bool IsEmpty() const { return m_Blocks.empty(); }
    5313  bool IsCorruptionDetectionEnabled() const;
    5314 
    5315  VkResult Allocate(
    5316  VmaPool hCurrentPool,
    5317  uint32_t currentFrameIndex,
    5318  VkDeviceSize size,
    5319  VkDeviceSize alignment,
    5320  const VmaAllocationCreateInfo& createInfo,
    5321  VmaSuballocationType suballocType,
    5322  VmaAllocation* pAllocation);
    5323 
    5324  void Free(
    5325  VmaAllocation hAllocation);
    5326 
    5327  // Adds statistics of this BlockVector to pStats.
    5328  void AddStats(VmaStats* pStats);
    5329 
    5330 #if VMA_STATS_STRING_ENABLED
    5331  void PrintDetailedMap(class VmaJsonWriter& json);
    5332 #endif
    5333 
    5334  void MakePoolAllocationsLost(
    5335  uint32_t currentFrameIndex,
    5336  size_t* pLostAllocationCount);
    5337  VkResult CheckCorruption();
    5338 
    5339  VmaDefragmentator* EnsureDefragmentator(
    5340  VmaAllocator hAllocator,
    5341  uint32_t currentFrameIndex);
    5342 
    5343  VkResult Defragment(
    5344  VmaDefragmentationStats* pDefragmentationStats,
    5345  VkDeviceSize& maxBytesToMove,
    5346  uint32_t& maxAllocationsToMove);
    5347 
    5348  void DestroyDefragmentator();
    5349 
    5350 private:
    5351  friend class VmaDefragmentator;
    5352 
    5353  const VmaAllocator m_hAllocator;
    5354  const uint32_t m_MemoryTypeIndex;
    5355  const VkDeviceSize m_PreferredBlockSize;
    5356  const size_t m_MinBlockCount;
    5357  const size_t m_MaxBlockCount;
    5358  const VkDeviceSize m_BufferImageGranularity;
    5359  const uint32_t m_FrameInUseCount;
    5360  const bool m_IsCustomPool;
    5361  const bool m_ExplicitBlockSize;
    5362  const uint32_t m_Algorithm;
    5363  bool m_HasEmptyBlock;
    5364  VMA_MUTEX m_Mutex;
    5365  // Incrementally sorted by sumFreeSize, ascending.
    5366  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5367  /* There can be at most one allocation that is completely empty - a
    5368  hysteresis to avoid pessimistic case of alternating creation and destruction
    5369  of a VkDeviceMemory. */
    5370  VmaDefragmentator* m_pDefragmentator;
    5371  uint32_t m_NextBlockId;
    5372 
    5373  VkDeviceSize CalcMaxBlockSize() const;
    5374 
    5375  // Finds and removes given block from vector.
    5376  void Remove(VmaDeviceMemoryBlock* pBlock);
    5377 
    5378  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5379  // after this call.
    5380  void IncrementallySortBlocks();
    5381 
    5382  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5383  VkResult AllocateFromBlock(
    5384  VmaDeviceMemoryBlock* pBlock,
    5385  VmaPool hCurrentPool,
    5386  uint32_t currentFrameIndex,
    5387  VkDeviceSize size,
    5388  VkDeviceSize alignment,
    5389  VmaAllocationCreateFlags allocFlags,
    5390  void* pUserData,
    5391  VmaSuballocationType suballocType,
    5392  uint32_t strategy,
    5393  VmaAllocation* pAllocation);
    5394 
    5395  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5396 };
    5397 
    5398 struct VmaPool_T
    5399 {
    5400  VMA_CLASS_NO_COPY(VmaPool_T)
    5401 public:
    5402  VmaBlockVector m_BlockVector;
    5403 
    5404  VmaPool_T(
    5405  VmaAllocator hAllocator,
    5406  const VmaPoolCreateInfo& createInfo,
    5407  VkDeviceSize preferredBlockSize);
    5408  ~VmaPool_T();
    5409 
    5410  uint32_t GetId() const { return m_Id; }
    5411  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5412 
    5413 #if VMA_STATS_STRING_ENABLED
    5414  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5415 #endif
    5416 
    5417 private:
    5418  uint32_t m_Id;
    5419 };
    5420 
    5421 class VmaDefragmentator
    5422 {
    5423  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5424 private:
    5425  const VmaAllocator m_hAllocator;
    5426  VmaBlockVector* const m_pBlockVector;
    5427  uint32_t m_CurrentFrameIndex;
    5428  VkDeviceSize m_BytesMoved;
    5429  uint32_t m_AllocationsMoved;
    5430 
    5431  struct AllocationInfo
    5432  {
    5433  VmaAllocation m_hAllocation;
    5434  VkBool32* m_pChanged;
    5435 
    5436  AllocationInfo() :
    5437  m_hAllocation(VK_NULL_HANDLE),
    5438  m_pChanged(VMA_NULL)
    5439  {
    5440  }
    5441  };
    5442 
    5443  struct AllocationInfoSizeGreater
    5444  {
    5445  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5446  {
    5447  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5448  }
    5449  };
    5450 
    5451  // Used between AddAllocation and Defragment.
    5452  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5453 
    5454  struct BlockInfo
    5455  {
    5456  VmaDeviceMemoryBlock* m_pBlock;
    5457  bool m_HasNonMovableAllocations;
    5458  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5459 
    5460  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5461  m_pBlock(VMA_NULL),
    5462  m_HasNonMovableAllocations(true),
    5463  m_Allocations(pAllocationCallbacks),
    5464  m_pMappedDataForDefragmentation(VMA_NULL)
    5465  {
    5466  }
    5467 
    5468  void CalcHasNonMovableAllocations()
    5469  {
    5470  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5471  const size_t defragmentAllocCount = m_Allocations.size();
    5472  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5473  }
    5474 
    5475  void SortAllocationsBySizeDescecnding()
    5476  {
    5477  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5478  }
    5479 
    5480  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5481  void Unmap(VmaAllocator hAllocator);
    5482 
    5483  private:
    5484  // Not null if mapped for defragmentation only, not originally mapped.
    5485  void* m_pMappedDataForDefragmentation;
    5486  };
    5487 
    5488  struct BlockPointerLess
    5489  {
    5490  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5491  {
    5492  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5493  }
    5494  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5495  {
    5496  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5497  }
    5498  };
    5499 
    5500  // 1. Blocks with some non-movable allocations go first.
    5501  // 2. Blocks with smaller sumFreeSize go first.
    5502  struct BlockInfoCompareMoveDestination
    5503  {
    5504  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5505  {
    5506  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5507  {
    5508  return true;
    5509  }
    5510  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5511  {
    5512  return false;
    5513  }
    5514  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5515  {
    5516  return true;
    5517  }
    5518  return false;
    5519  }
    5520  };
    5521 
    5522  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5523  BlockInfoVector m_Blocks;
    5524 
    5525  VkResult DefragmentRound(
    5526  VkDeviceSize maxBytesToMove,
    5527  uint32_t maxAllocationsToMove);
    5528 
    5529  static bool MoveMakesSense(
    5530  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5531  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5532 
    5533 public:
    5534  VmaDefragmentator(
    5535  VmaAllocator hAllocator,
    5536  VmaBlockVector* pBlockVector,
    5537  uint32_t currentFrameIndex);
    5538 
    5539  ~VmaDefragmentator();
    5540 
    5541  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5542  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5543 
    5544  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5545 
    5546  VkResult Defragment(
    5547  VkDeviceSize maxBytesToMove,
    5548  uint32_t maxAllocationsToMove);
    5549 };
    5550 
    5551 #if VMA_RECORDING_ENABLED
    5552 
    5553 class VmaRecorder
    5554 {
    5555 public:
    5556  VmaRecorder();
    5557  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5558  void WriteConfiguration(
    5559  const VkPhysicalDeviceProperties& devProps,
    5560  const VkPhysicalDeviceMemoryProperties& memProps,
    5561  bool dedicatedAllocationExtensionEnabled);
    5562  ~VmaRecorder();
    5563 
    5564  void RecordCreateAllocator(uint32_t frameIndex);
    5565  void RecordDestroyAllocator(uint32_t frameIndex);
    5566  void RecordCreatePool(uint32_t frameIndex,
    5567  const VmaPoolCreateInfo& createInfo,
    5568  VmaPool pool);
    5569  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5570  void RecordAllocateMemory(uint32_t frameIndex,
    5571  const VkMemoryRequirements& vkMemReq,
    5572  const VmaAllocationCreateInfo& createInfo,
    5573  VmaAllocation allocation);
    5574  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5575  const VkMemoryRequirements& vkMemReq,
    5576  bool requiresDedicatedAllocation,
    5577  bool prefersDedicatedAllocation,
    5578  const VmaAllocationCreateInfo& createInfo,
    5579  VmaAllocation allocation);
    5580  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5581  const VkMemoryRequirements& vkMemReq,
    5582  bool requiresDedicatedAllocation,
    5583  bool prefersDedicatedAllocation,
    5584  const VmaAllocationCreateInfo& createInfo,
    5585  VmaAllocation allocation);
    5586  void RecordFreeMemory(uint32_t frameIndex,
    5587  VmaAllocation allocation);
    5588  void RecordSetAllocationUserData(uint32_t frameIndex,
    5589  VmaAllocation allocation,
    5590  const void* pUserData);
    5591  void RecordCreateLostAllocation(uint32_t frameIndex,
    5592  VmaAllocation allocation);
    5593  void RecordMapMemory(uint32_t frameIndex,
    5594  VmaAllocation allocation);
    5595  void RecordUnmapMemory(uint32_t frameIndex,
    5596  VmaAllocation allocation);
    5597  void RecordFlushAllocation(uint32_t frameIndex,
    5598  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5599  void RecordInvalidateAllocation(uint32_t frameIndex,
    5600  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5601  void RecordCreateBuffer(uint32_t frameIndex,
    5602  const VkBufferCreateInfo& bufCreateInfo,
    5603  const VmaAllocationCreateInfo& allocCreateInfo,
    5604  VmaAllocation allocation);
    5605  void RecordCreateImage(uint32_t frameIndex,
    5606  const VkImageCreateInfo& imageCreateInfo,
    5607  const VmaAllocationCreateInfo& allocCreateInfo,
    5608  VmaAllocation allocation);
    5609  void RecordDestroyBuffer(uint32_t frameIndex,
    5610  VmaAllocation allocation);
    5611  void RecordDestroyImage(uint32_t frameIndex,
    5612  VmaAllocation allocation);
    5613  void RecordTouchAllocation(uint32_t frameIndex,
    5614  VmaAllocation allocation);
    5615  void RecordGetAllocationInfo(uint32_t frameIndex,
    5616  VmaAllocation allocation);
    5617  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5618  VmaPool pool);
    5619 
    5620 private:
    5621  struct CallParams
    5622  {
    5623  uint32_t threadId;
    5624  double time;
    5625  };
    5626 
    5627  class UserDataString
    5628  {
    5629  public:
    5630  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5631  const char* GetString() const { return m_Str; }
    5632 
    5633  private:
    5634  char m_PtrStr[17];
    5635  const char* m_Str;
    5636  };
    5637 
    5638  bool m_UseMutex;
    5639  VmaRecordFlags m_Flags;
    5640  FILE* m_File;
    5641  VMA_MUTEX m_FileMutex;
    5642  int64_t m_Freq;
    5643  int64_t m_StartCounter;
    5644 
    5645  void GetBasicParams(CallParams& outParams);
    5646  void Flush();
    5647 };
    5648 
    5649 #endif // #if VMA_RECORDING_ENABLED
    5650 
    5651 // Main allocator object.
    5652 struct VmaAllocator_T
    5653 {
    5654  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5655 public:
    5656  bool m_UseMutex;
    5657  bool m_UseKhrDedicatedAllocation;
    5658  VkDevice m_hDevice;
    5659  bool m_AllocationCallbacksSpecified;
    5660  VkAllocationCallbacks m_AllocationCallbacks;
    5661  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5662 
    5663  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5664  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5665  VMA_MUTEX m_HeapSizeLimitMutex;
    5666 
    5667  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5668  VkPhysicalDeviceMemoryProperties m_MemProps;
    5669 
    5670  // Default pools.
    5671  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5672 
    5673  // Each vector is sorted by memory (handle value).
    5674  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5675  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5676  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5677 
    5678  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5679  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5680  ~VmaAllocator_T();
    5681 
    5682  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5683  {
    5684  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5685  }
    5686  const VmaVulkanFunctions& GetVulkanFunctions() const
    5687  {
    5688  return m_VulkanFunctions;
    5689  }
    5690 
    5691  VkDeviceSize GetBufferImageGranularity() const
    5692  {
    5693  return VMA_MAX(
    5694  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5695  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5696  }
    5697 
    5698  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5699  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5700 
    5701  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5702  {
    5703  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5704  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5705  }
    5706  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5707  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5708  {
    5709  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5710  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5711  }
    5712  // Minimum alignment for all allocations in specific memory type.
    5713  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5714  {
    5715  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5716  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5717  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5718  }
    5719 
    5720  bool IsIntegratedGpu() const
    5721  {
    5722  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5723  }
    5724 
    5725 #if VMA_RECORDING_ENABLED
    5726  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5727 #endif
    5728 
    5729  void GetBufferMemoryRequirements(
    5730  VkBuffer hBuffer,
    5731  VkMemoryRequirements& memReq,
    5732  bool& requiresDedicatedAllocation,
    5733  bool& prefersDedicatedAllocation) const;
    5734  void GetImageMemoryRequirements(
    5735  VkImage hImage,
    5736  VkMemoryRequirements& memReq,
    5737  bool& requiresDedicatedAllocation,
    5738  bool& prefersDedicatedAllocation) const;
    5739 
    5740  // Main allocation function.
    5741  VkResult AllocateMemory(
    5742  const VkMemoryRequirements& vkMemReq,
    5743  bool requiresDedicatedAllocation,
    5744  bool prefersDedicatedAllocation,
    5745  VkBuffer dedicatedBuffer,
    5746  VkImage dedicatedImage,
    5747  const VmaAllocationCreateInfo& createInfo,
    5748  VmaSuballocationType suballocType,
    5749  VmaAllocation* pAllocation);
    5750 
    5751  // Main deallocation function.
    5752  void FreeMemory(const VmaAllocation allocation);
    5753 
    5754  void CalculateStats(VmaStats* pStats);
    5755 
    5756 #if VMA_STATS_STRING_ENABLED
    5757  void PrintDetailedMap(class VmaJsonWriter& json);
    5758 #endif
    5759 
    5760  VkResult Defragment(
    5761  VmaAllocation* pAllocations,
    5762  size_t allocationCount,
    5763  VkBool32* pAllocationsChanged,
    5764  const VmaDefragmentationInfo* pDefragmentationInfo,
    5765  VmaDefragmentationStats* pDefragmentationStats);
    5766 
    5767  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5768  bool TouchAllocation(VmaAllocation hAllocation);
    5769 
    5770  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5771  void DestroyPool(VmaPool pool);
    5772  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5773 
    5774  void SetCurrentFrameIndex(uint32_t frameIndex);
    5775  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5776 
    5777  void MakePoolAllocationsLost(
    5778  VmaPool hPool,
    5779  size_t* pLostAllocationCount);
    5780  VkResult CheckPoolCorruption(VmaPool hPool);
    5781  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5782 
    5783  void CreateLostAllocation(VmaAllocation* pAllocation);
    5784 
    5785  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5786  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5787 
    5788  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5789  void Unmap(VmaAllocation hAllocation);
    5790 
    5791  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5792  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5793 
    5794  void FlushOrInvalidateAllocation(
    5795  VmaAllocation hAllocation,
    5796  VkDeviceSize offset, VkDeviceSize size,
    5797  VMA_CACHE_OPERATION op);
    5798 
    5799  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5800 
    5801 private:
    5802  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5803 
    5804  VkPhysicalDevice m_PhysicalDevice;
    5805  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5806 
    5807  VMA_MUTEX m_PoolsMutex;
    5808  // Protected by m_PoolsMutex. Sorted by pointer value.
    5809  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5810  uint32_t m_NextPoolId;
    5811 
    5812  VmaVulkanFunctions m_VulkanFunctions;
    5813 
    5814 #if VMA_RECORDING_ENABLED
    5815  VmaRecorder* m_pRecorder;
    5816 #endif
    5817 
    5818  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5819 
    5820  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5821 
    5822  VkResult AllocateMemoryOfType(
    5823  VkDeviceSize size,
    5824  VkDeviceSize alignment,
    5825  bool dedicatedAllocation,
    5826  VkBuffer dedicatedBuffer,
    5827  VkImage dedicatedImage,
    5828  const VmaAllocationCreateInfo& createInfo,
    5829  uint32_t memTypeIndex,
    5830  VmaSuballocationType suballocType,
    5831  VmaAllocation* pAllocation);
    5832 
    5833  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5834  VkResult AllocateDedicatedMemory(
    5835  VkDeviceSize size,
    5836  VmaSuballocationType suballocType,
    5837  uint32_t memTypeIndex,
    5838  bool map,
    5839  bool isUserDataString,
    5840  void* pUserData,
    5841  VkBuffer dedicatedBuffer,
    5842  VkImage dedicatedImage,
    5843  VmaAllocation* pAllocation);
    5844 
    5845  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5846  void FreeDedicatedMemory(VmaAllocation allocation);
    5847 };
    5848 
    5850 // Memory allocation #2 after VmaAllocator_T definition
    5851 
    5852 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5853 {
    5854  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5855 }
    5856 
    5857 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5858 {
    5859  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5860 }
    5861 
    5862 template<typename T>
    5863 static T* VmaAllocate(VmaAllocator hAllocator)
    5864 {
    5865  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5866 }
    5867 
    5868 template<typename T>
    5869 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5870 {
    5871  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5872 }
    5873 
    5874 template<typename T>
    5875 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5876 {
    5877  if(ptr != VMA_NULL)
    5878  {
    5879  ptr->~T();
    5880  VmaFree(hAllocator, ptr);
    5881  }
    5882 }
    5883 
    5884 template<typename T>
    5885 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5886 {
    5887  if(ptr != VMA_NULL)
    5888  {
    5889  for(size_t i = count; i--; )
    5890  ptr[i].~T();
    5891  VmaFree(hAllocator, ptr);
    5892  }
    5893 }
    5894 
    5896 // VmaStringBuilder
    5897 
    5898 #if VMA_STATS_STRING_ENABLED
    5899 
    5900 class VmaStringBuilder
    5901 {
    5902 public:
    5903  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5904  size_t GetLength() const { return m_Data.size(); }
    5905  const char* GetData() const { return m_Data.data(); }
    5906 
    5907  void Add(char ch) { m_Data.push_back(ch); }
    5908  void Add(const char* pStr);
    5909  void AddNewLine() { Add('\n'); }
    5910  void AddNumber(uint32_t num);
    5911  void AddNumber(uint64_t num);
    5912  void AddPointer(const void* ptr);
    5913 
    5914 private:
    5915  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5916 };
    5917 
    5918 void VmaStringBuilder::Add(const char* pStr)
    5919 {
    5920  const size_t strLen = strlen(pStr);
    5921  if(strLen > 0)
    5922  {
    5923  const size_t oldCount = m_Data.size();
    5924  m_Data.resize(oldCount + strLen);
    5925  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5926  }
    5927 }
    5928 
    5929 void VmaStringBuilder::AddNumber(uint32_t num)
    5930 {
    5931  char buf[11];
    5932  VmaUint32ToStr(buf, sizeof(buf), num);
    5933  Add(buf);
    5934 }
    5935 
    5936 void VmaStringBuilder::AddNumber(uint64_t num)
    5937 {
    5938  char buf[21];
    5939  VmaUint64ToStr(buf, sizeof(buf), num);
    5940  Add(buf);
    5941 }
    5942 
    5943 void VmaStringBuilder::AddPointer(const void* ptr)
    5944 {
    5945  char buf[21];
    5946  VmaPtrToStr(buf, sizeof(buf), ptr);
    5947  Add(buf);
    5948 }
    5949 
    5950 #endif // #if VMA_STATS_STRING_ENABLED
    5951 
    5953 // VmaJsonWriter
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaJsonWriter
    5958 {
    5959  VMA_CLASS_NO_COPY(VmaJsonWriter)
    5960 public:
    5961  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    5962  ~VmaJsonWriter();
    5963 
    5964  void BeginObject(bool singleLine = false);
    5965  void EndObject();
    5966 
    5967  void BeginArray(bool singleLine = false);
    5968  void EndArray();
    5969 
    5970  void WriteString(const char* pStr);
    5971  void BeginString(const char* pStr = VMA_NULL);
    5972  void ContinueString(const char* pStr);
    5973  void ContinueString(uint32_t n);
    5974  void ContinueString(uint64_t n);
    5975  void ContinueString_Pointer(const void* ptr);
    5976  void EndString(const char* pStr = VMA_NULL);
    5977 
    5978  void WriteNumber(uint32_t n);
    5979  void WriteNumber(uint64_t n);
    5980  void WriteBool(bool b);
    5981  void WriteNull();
    5982 
    5983 private:
    5984  static const char* const INDENT;
    5985 
    5986  enum COLLECTION_TYPE
    5987  {
    5988  COLLECTION_TYPE_OBJECT,
    5989  COLLECTION_TYPE_ARRAY,
    5990  };
    5991  struct StackItem
    5992  {
    5993  COLLECTION_TYPE type;
    5994  uint32_t valueCount;
    5995  bool singleLineMode;
    5996  };
    5997 
    5998  VmaStringBuilder& m_SB;
    5999  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6000  bool m_InsideString;
    6001 
    6002  void BeginValue(bool isString);
    6003  void WriteIndent(bool oneLess = false);
    6004 };
    6005 
    6006 const char* const VmaJsonWriter::INDENT = " ";
    6007 
    6008 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6009  m_SB(sb),
    6010  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6011  m_InsideString(false)
    6012 {
    6013 }
    6014 
    6015 VmaJsonWriter::~VmaJsonWriter()
    6016 {
    6017  VMA_ASSERT(!m_InsideString);
    6018  VMA_ASSERT(m_Stack.empty());
    6019 }
    6020 
    6021 void VmaJsonWriter::BeginObject(bool singleLine)
    6022 {
    6023  VMA_ASSERT(!m_InsideString);
    6024 
    6025  BeginValue(false);
    6026  m_SB.Add('{');
    6027 
    6028  StackItem item;
    6029  item.type = COLLECTION_TYPE_OBJECT;
    6030  item.valueCount = 0;
    6031  item.singleLineMode = singleLine;
    6032  m_Stack.push_back(item);
    6033 }
    6034 
    6035 void VmaJsonWriter::EndObject()
    6036 {
    6037  VMA_ASSERT(!m_InsideString);
    6038 
    6039  WriteIndent(true);
    6040  m_SB.Add('}');
    6041 
    6042  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6043  m_Stack.pop_back();
    6044 }
    6045 
    6046 void VmaJsonWriter::BeginArray(bool singleLine)
    6047 {
    6048  VMA_ASSERT(!m_InsideString);
    6049 
    6050  BeginValue(false);
    6051  m_SB.Add('[');
    6052 
    6053  StackItem item;
    6054  item.type = COLLECTION_TYPE_ARRAY;
    6055  item.valueCount = 0;
    6056  item.singleLineMode = singleLine;
    6057  m_Stack.push_back(item);
    6058 }
    6059 
    6060 void VmaJsonWriter::EndArray()
    6061 {
    6062  VMA_ASSERT(!m_InsideString);
    6063 
    6064  WriteIndent(true);
    6065  m_SB.Add(']');
    6066 
    6067  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6068  m_Stack.pop_back();
    6069 }
    6070 
    6071 void VmaJsonWriter::WriteString(const char* pStr)
    6072 {
    6073  BeginString(pStr);
    6074  EndString();
    6075 }
    6076 
    6077 void VmaJsonWriter::BeginString(const char* pStr)
    6078 {
    6079  VMA_ASSERT(!m_InsideString);
    6080 
    6081  BeginValue(true);
    6082  m_SB.Add('"');
    6083  m_InsideString = true;
    6084  if(pStr != VMA_NULL && pStr[0] != '\0')
    6085  {
    6086  ContinueString(pStr);
    6087  }
    6088 }
    6089 
    6090 void VmaJsonWriter::ContinueString(const char* pStr)
    6091 {
    6092  VMA_ASSERT(m_InsideString);
    6093 
    6094  const size_t strLen = strlen(pStr);
    6095  for(size_t i = 0; i < strLen; ++i)
    6096  {
    6097  char ch = pStr[i];
    6098  if(ch == '\\')
    6099  {
    6100  m_SB.Add("\\\\");
    6101  }
    6102  else if(ch == '"')
    6103  {
    6104  m_SB.Add("\\\"");
    6105  }
    6106  else if(ch >= 32)
    6107  {
    6108  m_SB.Add(ch);
    6109  }
    6110  else switch(ch)
    6111  {
    6112  case '\b':
    6113  m_SB.Add("\\b");
    6114  break;
    6115  case '\f':
    6116  m_SB.Add("\\f");
    6117  break;
    6118  case '\n':
    6119  m_SB.Add("\\n");
    6120  break;
    6121  case '\r':
    6122  m_SB.Add("\\r");
    6123  break;
    6124  case '\t':
    6125  m_SB.Add("\\t");
    6126  break;
    6127  default:
    6128  VMA_ASSERT(0 && "Character not currently supported.");
    6129  break;
    6130  }
    6131  }
    6132 }
    6133 
    6134 void VmaJsonWriter::ContinueString(uint32_t n)
    6135 {
    6136  VMA_ASSERT(m_InsideString);
    6137  m_SB.AddNumber(n);
    6138 }
    6139 
    6140 void VmaJsonWriter::ContinueString(uint64_t n)
    6141 {
    6142  VMA_ASSERT(m_InsideString);
    6143  m_SB.AddNumber(n);
    6144 }
    6145 
    6146 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6147 {
    6148  VMA_ASSERT(m_InsideString);
    6149  m_SB.AddPointer(ptr);
    6150 }
    6151 
    6152 void VmaJsonWriter::EndString(const char* pStr)
    6153 {
    6154  VMA_ASSERT(m_InsideString);
    6155  if(pStr != VMA_NULL && pStr[0] != '\0')
    6156  {
    6157  ContinueString(pStr);
    6158  }
    6159  m_SB.Add('"');
    6160  m_InsideString = false;
    6161 }
    6162 
    6163 void VmaJsonWriter::WriteNumber(uint32_t n)
    6164 {
    6165  VMA_ASSERT(!m_InsideString);
    6166  BeginValue(false);
    6167  m_SB.AddNumber(n);
    6168 }
    6169 
    6170 void VmaJsonWriter::WriteNumber(uint64_t n)
    6171 {
    6172  VMA_ASSERT(!m_InsideString);
    6173  BeginValue(false);
    6174  m_SB.AddNumber(n);
    6175 }
    6176 
    6177 void VmaJsonWriter::WriteBool(bool b)
    6178 {
    6179  VMA_ASSERT(!m_InsideString);
    6180  BeginValue(false);
    6181  m_SB.Add(b ? "true" : "false");
    6182 }
    6183 
    6184 void VmaJsonWriter::WriteNull()
    6185 {
    6186  VMA_ASSERT(!m_InsideString);
    6187  BeginValue(false);
    6188  m_SB.Add("null");
    6189 }
    6190 
    6191 void VmaJsonWriter::BeginValue(bool isString)
    6192 {
    6193  if(!m_Stack.empty())
    6194  {
    6195  StackItem& currItem = m_Stack.back();
    6196  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6197  currItem.valueCount % 2 == 0)
    6198  {
    6199  VMA_ASSERT(isString);
    6200  }
    6201 
    6202  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6203  currItem.valueCount % 2 != 0)
    6204  {
    6205  m_SB.Add(": ");
    6206  }
    6207  else if(currItem.valueCount > 0)
    6208  {
    6209  m_SB.Add(", ");
    6210  WriteIndent();
    6211  }
    6212  else
    6213  {
    6214  WriteIndent();
    6215  }
    6216  ++currItem.valueCount;
    6217  }
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteIndent(bool oneLess)
    6221 {
    6222  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6223  {
    6224  m_SB.AddNewLine();
    6225 
    6226  size_t count = m_Stack.size();
    6227  if(count > 0 && oneLess)
    6228  {
    6229  --count;
    6230  }
    6231  for(size_t i = 0; i < count; ++i)
    6232  {
    6233  m_SB.Add(INDENT);
    6234  }
    6235  }
    6236 }
    6237 
    6238 #endif // #if VMA_STATS_STRING_ENABLED
    6239 
    6241 
    6242 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6243 {
    6244  if(IsUserDataString())
    6245  {
    6246  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6247 
    6248  FreeUserDataString(hAllocator);
    6249 
    6250  if(pUserData != VMA_NULL)
    6251  {
    6252  const char* const newStrSrc = (char*)pUserData;
    6253  const size_t newStrLen = strlen(newStrSrc);
    6254  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6255  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6256  m_pUserData = newStrDst;
    6257  }
    6258  }
    6259  else
    6260  {
    6261  m_pUserData = pUserData;
    6262  }
    6263 }
    6264 
    6265 void VmaAllocation_T::ChangeBlockAllocation(
    6266  VmaAllocator hAllocator,
    6267  VmaDeviceMemoryBlock* block,
    6268  VkDeviceSize offset)
    6269 {
    6270  VMA_ASSERT(block != VMA_NULL);
    6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6272 
    6273  // Move mapping reference counter from old block to new block.
    6274  if(block != m_BlockAllocation.m_Block)
    6275  {
    6276  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6277  if(IsPersistentMap())
    6278  ++mapRefCount;
    6279  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6280  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6281  }
    6282 
    6283  m_BlockAllocation.m_Block = block;
    6284  m_BlockAllocation.m_Offset = offset;
    6285 }
    6286 
    6287 VkDeviceSize VmaAllocation_T::GetOffset() const
    6288 {
    6289  switch(m_Type)
    6290  {
    6291  case ALLOCATION_TYPE_BLOCK:
    6292  return m_BlockAllocation.m_Offset;
    6293  case ALLOCATION_TYPE_DEDICATED:
    6294  return 0;
    6295  default:
    6296  VMA_ASSERT(0);
    6297  return 0;
    6298  }
    6299 }
    6300 
    6301 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6302 {
    6303  switch(m_Type)
    6304  {
    6305  case ALLOCATION_TYPE_BLOCK:
    6306  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6307  case ALLOCATION_TYPE_DEDICATED:
    6308  return m_DedicatedAllocation.m_hMemory;
    6309  default:
    6310  VMA_ASSERT(0);
    6311  return VK_NULL_HANDLE;
    6312  }
    6313 }
    6314 
    6315 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6316 {
    6317  switch(m_Type)
    6318  {
    6319  case ALLOCATION_TYPE_BLOCK:
    6320  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6321  case ALLOCATION_TYPE_DEDICATED:
    6322  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6323  default:
    6324  VMA_ASSERT(0);
    6325  return UINT32_MAX;
    6326  }
    6327 }
    6328 
    6329 void* VmaAllocation_T::GetMappedData() const
    6330 {
    6331  switch(m_Type)
    6332  {
    6333  case ALLOCATION_TYPE_BLOCK:
    6334  if(m_MapCount != 0)
    6335  {
    6336  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6337  VMA_ASSERT(pBlockData != VMA_NULL);
    6338  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6339  }
    6340  else
    6341  {
    6342  return VMA_NULL;
    6343  }
    6344  break;
    6345  case ALLOCATION_TYPE_DEDICATED:
    6346  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6347  return m_DedicatedAllocation.m_pMappedData;
    6348  default:
    6349  VMA_ASSERT(0);
    6350  return VMA_NULL;
    6351  }
    6352 }
    6353 
    6354 bool VmaAllocation_T::CanBecomeLost() const
    6355 {
    6356  switch(m_Type)
    6357  {
    6358  case ALLOCATION_TYPE_BLOCK:
    6359  return m_BlockAllocation.m_CanBecomeLost;
    6360  case ALLOCATION_TYPE_DEDICATED:
    6361  return false;
    6362  default:
    6363  VMA_ASSERT(0);
    6364  return false;
    6365  }
    6366 }
    6367 
    6368 VmaPool VmaAllocation_T::GetPool() const
    6369 {
    6370  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6371  return m_BlockAllocation.m_hPool;
    6372 }
    6373 
    6374 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6375 {
    6376  VMA_ASSERT(CanBecomeLost());
    6377 
    6378  /*
    6379  Warning: This is a carefully designed algorithm.
    6380  Do not modify unless you really know what you're doing :)
    6381  */
    6382  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6383  for(;;)
    6384  {
    6385  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6386  {
    6387  VMA_ASSERT(0);
    6388  return false;
    6389  }
    6390  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6391  {
    6392  return false;
    6393  }
    6394  else // Last use time earlier than current time.
    6395  {
    6396  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6397  {
    6398  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6399  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6400  return true;
    6401  }
    6402  }
    6403  }
    6404 }
    6405 
    6406 #if VMA_STATS_STRING_ENABLED
    6407 
    6408 // Correspond to values of enum VmaSuballocationType.
    6409 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6410  "FREE",
    6411  "UNKNOWN",
    6412  "BUFFER",
    6413  "IMAGE_UNKNOWN",
    6414  "IMAGE_LINEAR",
    6415  "IMAGE_OPTIMAL",
    6416 };
    6417 
    6418 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6419 {
    6420  json.WriteString("Type");
    6421  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6422 
    6423  json.WriteString("Size");
    6424  json.WriteNumber(m_Size);
    6425 
    6426  if(m_pUserData != VMA_NULL)
    6427  {
    6428  json.WriteString("UserData");
    6429  if(IsUserDataString())
    6430  {
    6431  json.WriteString((const char*)m_pUserData);
    6432  }
    6433  else
    6434  {
    6435  json.BeginString();
    6436  json.ContinueString_Pointer(m_pUserData);
    6437  json.EndString();
    6438  }
    6439  }
    6440 
    6441  json.WriteString("CreationFrameIndex");
    6442  json.WriteNumber(m_CreationFrameIndex);
    6443 
    6444  json.WriteString("LastUseFrameIndex");
    6445  json.WriteNumber(GetLastUseFrameIndex());
    6446 
    6447  if(m_BufferImageUsage != 0)
    6448  {
    6449  json.WriteString("Usage");
    6450  json.WriteNumber(m_BufferImageUsage);
    6451  }
    6452 }
    6453 
    6454 #endif
    6455 
    6456 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6457 {
    6458  VMA_ASSERT(IsUserDataString());
    6459  if(m_pUserData != VMA_NULL)
    6460  {
    6461  char* const oldStr = (char*)m_pUserData;
    6462  const size_t oldStrLen = strlen(oldStr);
    6463  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6464  m_pUserData = VMA_NULL;
    6465  }
    6466 }
    6467 
    6468 void VmaAllocation_T::BlockAllocMap()
    6469 {
    6470  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6471 
    6472  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6473  {
    6474  ++m_MapCount;
    6475  }
    6476  else
    6477  {
    6478  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6479  }
    6480 }
    6481 
    6482 void VmaAllocation_T::BlockAllocUnmap()
    6483 {
    6484  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6485 
    6486  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6487  {
    6488  --m_MapCount;
    6489  }
    6490  else
    6491  {
    6492  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6493  }
    6494 }
    6495 
    6496 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6497 {
    6498  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6499 
    6500  if(m_MapCount != 0)
    6501  {
    6502  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6503  {
    6504  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6505  *ppData = m_DedicatedAllocation.m_pMappedData;
    6506  ++m_MapCount;
    6507  return VK_SUCCESS;
    6508  }
    6509  else
    6510  {
    6511  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6512  return VK_ERROR_MEMORY_MAP_FAILED;
    6513  }
    6514  }
    6515  else
    6516  {
    6517  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6518  hAllocator->m_hDevice,
    6519  m_DedicatedAllocation.m_hMemory,
    6520  0, // offset
    6521  VK_WHOLE_SIZE,
    6522  0, // flags
    6523  ppData);
    6524  if(result == VK_SUCCESS)
    6525  {
    6526  m_DedicatedAllocation.m_pMappedData = *ppData;
    6527  m_MapCount = 1;
    6528  }
    6529  return result;
    6530  }
    6531 }
    6532 
    6533 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6534 {
    6535  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6536 
    6537  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6538  {
    6539  --m_MapCount;
    6540  if(m_MapCount == 0)
    6541  {
    6542  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6543  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6544  hAllocator->m_hDevice,
    6545  m_DedicatedAllocation.m_hMemory);
    6546  }
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 #if VMA_STATS_STRING_ENABLED
    6555 
    6556 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6557 {
    6558  json.BeginObject();
    6559 
    6560  json.WriteString("Blocks");
    6561  json.WriteNumber(stat.blockCount);
    6562 
    6563  json.WriteString("Allocations");
    6564  json.WriteNumber(stat.allocationCount);
    6565 
    6566  json.WriteString("UnusedRanges");
    6567  json.WriteNumber(stat.unusedRangeCount);
    6568 
    6569  json.WriteString("UsedBytes");
    6570  json.WriteNumber(stat.usedBytes);
    6571 
    6572  json.WriteString("UnusedBytes");
    6573  json.WriteNumber(stat.unusedBytes);
    6574 
    6575  if(stat.allocationCount > 1)
    6576  {
    6577  json.WriteString("AllocationSize");
    6578  json.BeginObject(true);
    6579  json.WriteString("Min");
    6580  json.WriteNumber(stat.allocationSizeMin);
    6581  json.WriteString("Avg");
    6582  json.WriteNumber(stat.allocationSizeAvg);
    6583  json.WriteString("Max");
    6584  json.WriteNumber(stat.allocationSizeMax);
    6585  json.EndObject();
    6586  }
    6587 
    6588  if(stat.unusedRangeCount > 1)
    6589  {
    6590  json.WriteString("UnusedRangeSize");
    6591  json.BeginObject(true);
    6592  json.WriteString("Min");
    6593  json.WriteNumber(stat.unusedRangeSizeMin);
    6594  json.WriteString("Avg");
    6595  json.WriteNumber(stat.unusedRangeSizeAvg);
    6596  json.WriteString("Max");
    6597  json.WriteNumber(stat.unusedRangeSizeMax);
    6598  json.EndObject();
    6599  }
    6600 
    6601  json.EndObject();
    6602 }
    6603 
    6604 #endif // #if VMA_STATS_STRING_ENABLED
    6605 
    6606 struct VmaSuballocationItemSizeLess
    6607 {
    6608  bool operator()(
    6609  const VmaSuballocationList::iterator lhs,
    6610  const VmaSuballocationList::iterator rhs) const
    6611  {
    6612  return lhs->size < rhs->size;
    6613  }
    6614  bool operator()(
    6615  const VmaSuballocationList::iterator lhs,
    6616  VkDeviceSize rhsSize) const
    6617  {
    6618  return lhs->size < rhsSize;
    6619  }
    6620 };
    6621 
    6622 
    6624 // class VmaBlockMetadata
    6625 
    6626 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6627  m_Size(0),
    6628  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6629 {
    6630 }
    6631 
    6632 #if VMA_STATS_STRING_ENABLED
    6633 
    6634 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6635  VkDeviceSize unusedBytes,
    6636  size_t allocationCount,
    6637  size_t unusedRangeCount) const
    6638 {
    6639  json.BeginObject();
    6640 
    6641  json.WriteString("TotalBytes");
    6642  json.WriteNumber(GetSize());
    6643 
    6644  json.WriteString("UnusedBytes");
    6645  json.WriteNumber(unusedBytes);
    6646 
    6647  json.WriteString("Allocations");
    6648  json.WriteNumber((uint64_t)allocationCount);
    6649 
    6650  json.WriteString("UnusedRanges");
    6651  json.WriteNumber((uint64_t)unusedRangeCount);
    6652 
    6653  json.WriteString("Suballocations");
    6654  json.BeginArray();
    6655 }
    6656 
    6657 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6658  VkDeviceSize offset,
    6659  VmaAllocation hAllocation) const
    6660 {
    6661  json.BeginObject(true);
    6662 
    6663  json.WriteString("Offset");
    6664  json.WriteNumber(offset);
    6665 
    6666  hAllocation->PrintParameters(json);
    6667 
    6668  json.EndObject();
    6669 }
    6670 
    6671 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6672  VkDeviceSize offset,
    6673  VkDeviceSize size) const
    6674 {
    6675  json.BeginObject(true);
    6676 
    6677  json.WriteString("Offset");
    6678  json.WriteNumber(offset);
    6679 
    6680  json.WriteString("Type");
    6681  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6682 
    6683  json.WriteString("Size");
    6684  json.WriteNumber(size);
    6685 
    6686  json.EndObject();
    6687 }
    6688 
    6689 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6690 {
    6691  json.EndArray();
    6692  json.EndObject();
    6693 }
    6694 
    6695 #endif // #if VMA_STATS_STRING_ENABLED
    6696 
    6698 // class VmaBlockMetadata_Generic
    6699 
    6700 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6701  VmaBlockMetadata(hAllocator),
    6702  m_FreeCount(0),
    6703  m_SumFreeSize(0),
    6704  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6705  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6706 {
    6707 }
    6708 
    6709 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6710 {
    6711 }
    6712 
    6713 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6714 {
    6715  VmaBlockMetadata::Init(size);
    6716 
    6717  m_FreeCount = 1;
    6718  m_SumFreeSize = size;
    6719 
    6720  VmaSuballocation suballoc = {};
    6721  suballoc.offset = 0;
    6722  suballoc.size = size;
    6723  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6724  suballoc.hAllocation = VK_NULL_HANDLE;
    6725 
    6726  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6727  m_Suballocations.push_back(suballoc);
    6728  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6729  --suballocItem;
    6730  m_FreeSuballocationsBySize.push_back(suballocItem);
    6731 }
    6732 
    6733 bool VmaBlockMetadata_Generic::Validate() const
    6734 {
    6735  VMA_VALIDATE(!m_Suballocations.empty());
    6736 
    6737  // Expected offset of new suballocation as calculated from previous ones.
    6738  VkDeviceSize calculatedOffset = 0;
    6739  // Expected number of free suballocations as calculated from traversing their list.
    6740  uint32_t calculatedFreeCount = 0;
    6741  // Expected sum size of free suballocations as calculated from traversing their list.
    6742  VkDeviceSize calculatedSumFreeSize = 0;
    6743  // Expected number of free suballocations that should be registered in
    6744  // m_FreeSuballocationsBySize calculated from traversing their list.
    6745  size_t freeSuballocationsToRegister = 0;
    6746  // True if previous visited suballocation was free.
    6747  bool prevFree = false;
    6748 
    6749  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6750  suballocItem != m_Suballocations.cend();
    6751  ++suballocItem)
    6752  {
    6753  const VmaSuballocation& subAlloc = *suballocItem;
    6754 
    6755  // Actual offset of this suballocation doesn't match expected one.
    6756  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6757 
    6758  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6759  // Two adjacent free suballocations are invalid. They should be merged.
    6760  VMA_VALIDATE(!prevFree || !currFree);
    6761 
    6762  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6763 
    6764  if(currFree)
    6765  {
    6766  calculatedSumFreeSize += subAlloc.size;
    6767  ++calculatedFreeCount;
    6768  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6769  {
    6770  ++freeSuballocationsToRegister;
    6771  }
    6772 
    6773  // Margin required between allocations - every free space must be at least that large.
    6774  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6775  }
    6776  else
    6777  {
    6778  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6779  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6780 
    6781  // Margin required between allocations - previous allocation must be free.
    6782  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6783  }
    6784 
    6785  calculatedOffset += subAlloc.size;
    6786  prevFree = currFree;
    6787  }
    6788 
    6789  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6790  // match expected one.
    6791  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6792 
    6793  VkDeviceSize lastSize = 0;
    6794  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6795  {
    6796  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6797 
    6798  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6799  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6800  // They must be sorted by size ascending.
    6801  VMA_VALIDATE(suballocItem->size >= lastSize);
    6802 
    6803  lastSize = suballocItem->size;
    6804  }
    6805 
    6806  // Check if totals match calculacted values.
    6807  VMA_VALIDATE(ValidateFreeSuballocationList());
    6808  VMA_VALIDATE(calculatedOffset == GetSize());
    6809  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6810  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6811 
    6812  return true;
    6813 }
    6814 
    6815 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6816 {
    6817  if(!m_FreeSuballocationsBySize.empty())
    6818  {
    6819  return m_FreeSuballocationsBySize.back()->size;
    6820  }
    6821  else
    6822  {
    6823  return 0;
    6824  }
    6825 }
    6826 
    6827 bool VmaBlockMetadata_Generic::IsEmpty() const
    6828 {
    6829  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6830 }
    6831 
    6832 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6833 {
    6834  outInfo.blockCount = 1;
    6835 
    6836  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6837  outInfo.allocationCount = rangeCount - m_FreeCount;
    6838  outInfo.unusedRangeCount = m_FreeCount;
    6839 
    6840  outInfo.unusedBytes = m_SumFreeSize;
    6841  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6842 
    6843  outInfo.allocationSizeMin = UINT64_MAX;
    6844  outInfo.allocationSizeMax = 0;
    6845  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6846  outInfo.unusedRangeSizeMax = 0;
    6847 
    6848  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6849  suballocItem != m_Suballocations.cend();
    6850  ++suballocItem)
    6851  {
    6852  const VmaSuballocation& suballoc = *suballocItem;
    6853  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6854  {
    6855  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6856  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6857  }
    6858  else
    6859  {
    6860  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6861  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6862  }
    6863  }
    6864 }
    6865 
    6866 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6867 {
    6868  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6869 
    6870  inoutStats.size += GetSize();
    6871  inoutStats.unusedSize += m_SumFreeSize;
    6872  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6873  inoutStats.unusedRangeCount += m_FreeCount;
    6874  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6875 }
    6876 
    6877 #if VMA_STATS_STRING_ENABLED
    6878 
    6879 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6880 {
    6881  PrintDetailedMap_Begin(json,
    6882  m_SumFreeSize, // unusedBytes
    6883  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6884  m_FreeCount); // unusedRangeCount
    6885 
    6886  size_t i = 0;
    6887  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6888  suballocItem != m_Suballocations.cend();
    6889  ++suballocItem, ++i)
    6890  {
    6891  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6892  {
    6893  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6894  }
    6895  else
    6896  {
    6897  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6898  }
    6899  }
    6900 
    6901  PrintDetailedMap_End(json);
    6902 }
    6903 
    6904 #endif // #if VMA_STATS_STRING_ENABLED
    6905 
    6906 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6907  uint32_t currentFrameIndex,
    6908  uint32_t frameInUseCount,
    6909  VkDeviceSize bufferImageGranularity,
    6910  VkDeviceSize allocSize,
    6911  VkDeviceSize allocAlignment,
    6912  bool upperAddress,
    6913  VmaSuballocationType allocType,
    6914  bool canMakeOtherLost,
    6915  uint32_t strategy,
    6916  VmaAllocationRequest* pAllocationRequest)
    6917 {
    6918  VMA_ASSERT(allocSize > 0);
    6919  VMA_ASSERT(!upperAddress);
    6920  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6921  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6922  VMA_HEAVY_ASSERT(Validate());
    6923 
    6924  // There is not enough total free space in this block to fullfill the request: Early return.
    6925  if(canMakeOtherLost == false &&
    6926  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6927  {
    6928  return false;
    6929  }
    6930 
    6931  // New algorithm, efficiently searching freeSuballocationsBySize.
    6932  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6933  if(freeSuballocCount > 0)
    6934  {
    6936  {
    6937  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6938  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6939  m_FreeSuballocationsBySize.data(),
    6940  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6941  allocSize + 2 * VMA_DEBUG_MARGIN,
    6942  VmaSuballocationItemSizeLess());
    6943  size_t index = it - m_FreeSuballocationsBySize.data();
    6944  for(; index < freeSuballocCount; ++index)
    6945  {
    6946  if(CheckAllocation(
    6947  currentFrameIndex,
    6948  frameInUseCount,
    6949  bufferImageGranularity,
    6950  allocSize,
    6951  allocAlignment,
    6952  allocType,
    6953  m_FreeSuballocationsBySize[index],
    6954  false, // canMakeOtherLost
    6955  &pAllocationRequest->offset,
    6956  &pAllocationRequest->itemsToMakeLostCount,
    6957  &pAllocationRequest->sumFreeSize,
    6958  &pAllocationRequest->sumItemSize))
    6959  {
    6960  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6961  return true;
    6962  }
    6963  }
    6964  }
    6965  else // WORST_FIT, FIRST_FIT
    6966  {
    6967  // Search staring from biggest suballocations.
    6968  for(size_t index = freeSuballocCount; index--; )
    6969  {
    6970  if(CheckAllocation(
    6971  currentFrameIndex,
    6972  frameInUseCount,
    6973  bufferImageGranularity,
    6974  allocSize,
    6975  allocAlignment,
    6976  allocType,
    6977  m_FreeSuballocationsBySize[index],
    6978  false, // canMakeOtherLost
    6979  &pAllocationRequest->offset,
    6980  &pAllocationRequest->itemsToMakeLostCount,
    6981  &pAllocationRequest->sumFreeSize,
    6982  &pAllocationRequest->sumItemSize))
    6983  {
    6984  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    6985  return true;
    6986  }
    6987  }
    6988  }
    6989  }
    6990 
    6991  if(canMakeOtherLost)
    6992  {
    6993  // Brute-force algorithm. TODO: Come up with something better.
    6994 
    6995  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    6996  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    6997 
    6998  VmaAllocationRequest tmpAllocRequest = {};
    6999  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7000  suballocIt != m_Suballocations.end();
    7001  ++suballocIt)
    7002  {
    7003  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7004  suballocIt->hAllocation->CanBecomeLost())
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  suballocIt,
    7014  canMakeOtherLost,
    7015  &tmpAllocRequest.offset,
    7016  &tmpAllocRequest.itemsToMakeLostCount,
    7017  &tmpAllocRequest.sumFreeSize,
    7018  &tmpAllocRequest.sumItemSize))
    7019  {
    7020  tmpAllocRequest.item = suballocIt;
    7021 
    7022  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7024  {
    7025  *pAllocationRequest = tmpAllocRequest;
    7026  }
    7027  }
    7028  }
    7029  }
    7030 
    7031  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7032  {
    7033  return true;
    7034  }
    7035  }
    7036 
    7037  return false;
    7038 }
    7039 
    7040 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7041  uint32_t currentFrameIndex,
    7042  uint32_t frameInUseCount,
    7043  VmaAllocationRequest* pAllocationRequest)
    7044 {
    7045  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7046  {
    7047  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7048  {
    7049  ++pAllocationRequest->item;
    7050  }
    7051  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7052  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7053  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7054  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7055  {
    7056  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7057  --pAllocationRequest->itemsToMakeLostCount;
    7058  }
    7059  else
    7060  {
    7061  return false;
    7062  }
    7063  }
    7064 
    7065  VMA_HEAVY_ASSERT(Validate());
    7066  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7067  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7068 
    7069  return true;
    7070 }
    7071 
    7072 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7073 {
    7074  uint32_t lostAllocationCount = 0;
    7075  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7076  it != m_Suballocations.end();
    7077  ++it)
    7078  {
    7079  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7080  it->hAllocation->CanBecomeLost() &&
    7081  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7082  {
    7083  it = FreeSuballocation(it);
    7084  ++lostAllocationCount;
    7085  }
    7086  }
    7087  return lostAllocationCount;
    7088 }
    7089 
    7090 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7091 {
    7092  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7093  it != m_Suballocations.end();
    7094  ++it)
    7095  {
    7096  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7097  {
    7098  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7099  {
    7100  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7101  return VK_ERROR_VALIDATION_FAILED_EXT;
    7102  }
    7103  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7104  {
    7105  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7106  return VK_ERROR_VALIDATION_FAILED_EXT;
    7107  }
    7108  }
    7109  }
    7110 
    7111  return VK_SUCCESS;
    7112 }
    7113 
    7114 void VmaBlockMetadata_Generic::Alloc(
    7115  const VmaAllocationRequest& request,
    7116  VmaSuballocationType type,
    7117  VkDeviceSize allocSize,
    7118  bool upperAddress,
    7119  VmaAllocation hAllocation)
    7120 {
    7121  VMA_ASSERT(!upperAddress);
    7122  VMA_ASSERT(request.item != m_Suballocations.end());
    7123  VmaSuballocation& suballoc = *request.item;
    7124  // Given suballocation is a free block.
    7125  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7126  // Given offset is inside this suballocation.
    7127  VMA_ASSERT(request.offset >= suballoc.offset);
    7128  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7129  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7130  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7131 
    7132  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7133  // it to become used.
    7134  UnregisterFreeSuballocation(request.item);
    7135 
    7136  suballoc.offset = request.offset;
    7137  suballoc.size = allocSize;
    7138  suballoc.type = type;
    7139  suballoc.hAllocation = hAllocation;
    7140 
    7141  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7142  if(paddingEnd)
    7143  {
    7144  VmaSuballocation paddingSuballoc = {};
    7145  paddingSuballoc.offset = request.offset + allocSize;
    7146  paddingSuballoc.size = paddingEnd;
    7147  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7148  VmaSuballocationList::iterator next = request.item;
    7149  ++next;
    7150  const VmaSuballocationList::iterator paddingEndItem =
    7151  m_Suballocations.insert(next, paddingSuballoc);
    7152  RegisterFreeSuballocation(paddingEndItem);
    7153  }
    7154 
    7155  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7156  if(paddingBegin)
    7157  {
    7158  VmaSuballocation paddingSuballoc = {};
    7159  paddingSuballoc.offset = request.offset - paddingBegin;
    7160  paddingSuballoc.size = paddingBegin;
    7161  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7162  const VmaSuballocationList::iterator paddingBeginItem =
    7163  m_Suballocations.insert(request.item, paddingSuballoc);
    7164  RegisterFreeSuballocation(paddingBeginItem);
    7165  }
    7166 
    7167  // Update totals.
    7168  m_FreeCount = m_FreeCount - 1;
    7169  if(paddingBegin > 0)
    7170  {
    7171  ++m_FreeCount;
    7172  }
    7173  if(paddingEnd > 0)
    7174  {
    7175  ++m_FreeCount;
    7176  }
    7177  m_SumFreeSize -= allocSize;
    7178 }
    7179 
    7180 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7181 {
    7182  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7183  suballocItem != m_Suballocations.end();
    7184  ++suballocItem)
    7185  {
    7186  VmaSuballocation& suballoc = *suballocItem;
    7187  if(suballoc.hAllocation == allocation)
    7188  {
    7189  FreeSuballocation(suballocItem);
    7190  VMA_HEAVY_ASSERT(Validate());
    7191  return;
    7192  }
    7193  }
    7194  VMA_ASSERT(0 && "Not found!");
    7195 }
    7196 
    7197 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7198 {
    7199  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7200  suballocItem != m_Suballocations.end();
    7201  ++suballocItem)
    7202  {
    7203  VmaSuballocation& suballoc = *suballocItem;
    7204  if(suballoc.offset == offset)
    7205  {
    7206  FreeSuballocation(suballocItem);
    7207  return;
    7208  }
    7209  }
    7210  VMA_ASSERT(0 && "Not found!");
    7211 }
    7212 
    7213 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7214 {
    7215  VkDeviceSize lastSize = 0;
    7216  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7217  {
    7218  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7219 
    7220  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7221  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7222  VMA_VALIDATE(it->size >= lastSize);
    7223  lastSize = it->size;
    7224  }
    7225  return true;
    7226 }
    7227 
    7228 bool VmaBlockMetadata_Generic::CheckAllocation(
    7229  uint32_t currentFrameIndex,
    7230  uint32_t frameInUseCount,
    7231  VkDeviceSize bufferImageGranularity,
    7232  VkDeviceSize allocSize,
    7233  VkDeviceSize allocAlignment,
    7234  VmaSuballocationType allocType,
    7235  VmaSuballocationList::const_iterator suballocItem,
    7236  bool canMakeOtherLost,
    7237  VkDeviceSize* pOffset,
    7238  size_t* itemsToMakeLostCount,
    7239  VkDeviceSize* pSumFreeSize,
    7240  VkDeviceSize* pSumItemSize) const
    7241 {
    7242  VMA_ASSERT(allocSize > 0);
    7243  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7244  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7245  VMA_ASSERT(pOffset != VMA_NULL);
    7246 
    7247  *itemsToMakeLostCount = 0;
    7248  *pSumFreeSize = 0;
    7249  *pSumItemSize = 0;
    7250 
    7251  if(canMakeOtherLost)
    7252  {
    7253  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7254  {
    7255  *pSumFreeSize = suballocItem->size;
    7256  }
    7257  else
    7258  {
    7259  if(suballocItem->hAllocation->CanBecomeLost() &&
    7260  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7261  {
    7262  ++*itemsToMakeLostCount;
    7263  *pSumItemSize = suballocItem->size;
    7264  }
    7265  else
    7266  {
    7267  return false;
    7268  }
    7269  }
    7270 
    7271  // Remaining size is too small for this request: Early return.
    7272  if(GetSize() - suballocItem->offset < allocSize)
    7273  {
    7274  return false;
    7275  }
    7276 
    7277  // Start from offset equal to beginning of this suballocation.
    7278  *pOffset = suballocItem->offset;
    7279 
    7280  // Apply VMA_DEBUG_MARGIN at the beginning.
    7281  if(VMA_DEBUG_MARGIN > 0)
    7282  {
    7283  *pOffset += VMA_DEBUG_MARGIN;
    7284  }
    7285 
    7286  // Apply alignment.
    7287  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7288 
    7289  // Check previous suballocations for BufferImageGranularity conflicts.
    7290  // Make bigger alignment if necessary.
    7291  if(bufferImageGranularity > 1)
    7292  {
    7293  bool bufferImageGranularityConflict = false;
    7294  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7295  while(prevSuballocItem != m_Suballocations.cbegin())
    7296  {
    7297  --prevSuballocItem;
    7298  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7299  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7300  {
    7301  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7302  {
    7303  bufferImageGranularityConflict = true;
    7304  break;
    7305  }
    7306  }
    7307  else
    7308  // Already on previous page.
    7309  break;
    7310  }
    7311  if(bufferImageGranularityConflict)
    7312  {
    7313  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7314  }
    7315  }
    7316 
    7317  // Now that we have final *pOffset, check if we are past suballocItem.
    7318  // If yes, return false - this function should be called for another suballocItem as starting point.
    7319  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7320  {
    7321  return false;
    7322  }
    7323 
    7324  // Calculate padding at the beginning based on current offset.
    7325  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7326 
    7327  // Calculate required margin at the end.
    7328  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7329 
    7330  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7331  // Another early return check.
    7332  if(suballocItem->offset + totalSize > GetSize())
    7333  {
    7334  return false;
    7335  }
    7336 
    7337  // Advance lastSuballocItem until desired size is reached.
    7338  // Update itemsToMakeLostCount.
    7339  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7340  if(totalSize > suballocItem->size)
    7341  {
    7342  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7343  while(remainingSize > 0)
    7344  {
    7345  ++lastSuballocItem;
    7346  if(lastSuballocItem == m_Suballocations.cend())
    7347  {
    7348  return false;
    7349  }
    7350  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7351  {
    7352  *pSumFreeSize += lastSuballocItem->size;
    7353  }
    7354  else
    7355  {
    7356  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7357  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7358  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7359  {
    7360  ++*itemsToMakeLostCount;
    7361  *pSumItemSize += lastSuballocItem->size;
    7362  }
    7363  else
    7364  {
    7365  return false;
    7366  }
    7367  }
    7368  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7369  remainingSize - lastSuballocItem->size : 0;
    7370  }
    7371  }
    7372 
    7373  // Check next suballocations for BufferImageGranularity conflicts.
    7374  // If conflict exists, we must mark more allocations lost or fail.
    7375  if(bufferImageGranularity > 1)
    7376  {
    7377  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7378  ++nextSuballocItem;
    7379  while(nextSuballocItem != m_Suballocations.cend())
    7380  {
    7381  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7382  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7383  {
    7384  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7385  {
    7386  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7387  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7388  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7389  {
    7390  ++*itemsToMakeLostCount;
    7391  }
    7392  else
    7393  {
    7394  return false;
    7395  }
    7396  }
    7397  }
    7398  else
    7399  {
    7400  // Already on next page.
    7401  break;
    7402  }
    7403  ++nextSuballocItem;
    7404  }
    7405  }
    7406  }
    7407  else
    7408  {
    7409  const VmaSuballocation& suballoc = *suballocItem;
    7410  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7411 
    7412  *pSumFreeSize = suballoc.size;
    7413 
    7414  // Size of this suballocation is too small for this request: Early return.
    7415  if(suballoc.size < allocSize)
    7416  {
    7417  return false;
    7418  }
    7419 
    7420  // Start from offset equal to beginning of this suballocation.
    7421  *pOffset = suballoc.offset;
    7422 
    7423  // Apply VMA_DEBUG_MARGIN at the beginning.
    7424  if(VMA_DEBUG_MARGIN > 0)
    7425  {
    7426  *pOffset += VMA_DEBUG_MARGIN;
    7427  }
    7428 
    7429  // Apply alignment.
    7430  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7431 
    7432  // Check previous suballocations for BufferImageGranularity conflicts.
    7433  // Make bigger alignment if necessary.
    7434  if(bufferImageGranularity > 1)
    7435  {
    7436  bool bufferImageGranularityConflict = false;
    7437  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7438  while(prevSuballocItem != m_Suballocations.cbegin())
    7439  {
    7440  --prevSuballocItem;
    7441  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7442  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7443  {
    7444  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7445  {
    7446  bufferImageGranularityConflict = true;
    7447  break;
    7448  }
    7449  }
    7450  else
    7451  // Already on previous page.
    7452  break;
    7453  }
    7454  if(bufferImageGranularityConflict)
    7455  {
    7456  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7457  }
    7458  }
    7459 
    7460  // Calculate padding at the beginning based on current offset.
    7461  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7462 
    7463  // Calculate required margin at the end.
    7464  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7465 
    7466  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7467  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7468  {
    7469  return false;
    7470  }
    7471 
    7472  // Check next suballocations for BufferImageGranularity conflicts.
    7473  // If conflict exists, allocation cannot be made here.
    7474  if(bufferImageGranularity > 1)
    7475  {
    7476  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7477  ++nextSuballocItem;
    7478  while(nextSuballocItem != m_Suballocations.cend())
    7479  {
    7480  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7481  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7482  {
    7483  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7484  {
    7485  return false;
    7486  }
    7487  }
    7488  else
    7489  {
    7490  // Already on next page.
    7491  break;
    7492  }
    7493  ++nextSuballocItem;
    7494  }
    7495  }
    7496  }
    7497 
    7498  // All tests passed: Success. pOffset is already filled.
    7499  return true;
    7500 }
    7501 
    7502 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7503 {
    7504  VMA_ASSERT(item != m_Suballocations.end());
    7505  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7506 
    7507  VmaSuballocationList::iterator nextItem = item;
    7508  ++nextItem;
    7509  VMA_ASSERT(nextItem != m_Suballocations.end());
    7510  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7511 
    7512  item->size += nextItem->size;
    7513  --m_FreeCount;
    7514  m_Suballocations.erase(nextItem);
    7515 }
    7516 
    7517 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7518 {
    7519  // Change this suballocation to be marked as free.
    7520  VmaSuballocation& suballoc = *suballocItem;
    7521  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7522  suballoc.hAllocation = VK_NULL_HANDLE;
    7523 
    7524  // Update totals.
    7525  ++m_FreeCount;
    7526  m_SumFreeSize += suballoc.size;
    7527 
    7528  // Merge with previous and/or next suballocation if it's also free.
    7529  bool mergeWithNext = false;
    7530  bool mergeWithPrev = false;
    7531 
    7532  VmaSuballocationList::iterator nextItem = suballocItem;
    7533  ++nextItem;
    7534  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7535  {
    7536  mergeWithNext = true;
    7537  }
    7538 
    7539  VmaSuballocationList::iterator prevItem = suballocItem;
    7540  if(suballocItem != m_Suballocations.begin())
    7541  {
    7542  --prevItem;
    7543  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7544  {
    7545  mergeWithPrev = true;
    7546  }
    7547  }
    7548 
    7549  if(mergeWithNext)
    7550  {
    7551  UnregisterFreeSuballocation(nextItem);
    7552  MergeFreeWithNext(suballocItem);
    7553  }
    7554 
    7555  if(mergeWithPrev)
    7556  {
    7557  UnregisterFreeSuballocation(prevItem);
    7558  MergeFreeWithNext(prevItem);
    7559  RegisterFreeSuballocation(prevItem);
    7560  return prevItem;
    7561  }
    7562  else
    7563  {
    7564  RegisterFreeSuballocation(suballocItem);
    7565  return suballocItem;
    7566  }
    7567 }
    7568 
    7569 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7570 {
    7571  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7572  VMA_ASSERT(item->size > 0);
    7573 
    7574  // You may want to enable this validation at the beginning or at the end of
    7575  // this function, depending on what do you want to check.
    7576  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7577 
    7578  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7579  {
    7580  if(m_FreeSuballocationsBySize.empty())
    7581  {
    7582  m_FreeSuballocationsBySize.push_back(item);
    7583  }
    7584  else
    7585  {
    7586  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7587  }
    7588  }
    7589 
    7590  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7591 }
    7592 
    7593 
    7594 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7595 {
    7596  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7597  VMA_ASSERT(item->size > 0);
    7598 
    7599  // You may want to enable this validation at the beginning or at the end of
    7600  // this function, depending on what do you want to check.
    7601  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7602 
    7603  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7604  {
    7605  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7606  m_FreeSuballocationsBySize.data(),
    7607  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7608  item,
    7609  VmaSuballocationItemSizeLess());
    7610  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7611  index < m_FreeSuballocationsBySize.size();
    7612  ++index)
    7613  {
    7614  if(m_FreeSuballocationsBySize[index] == item)
    7615  {
    7616  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7617  return;
    7618  }
    7619  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7620  }
    7621  VMA_ASSERT(0 && "Not found.");
    7622  }
    7623 
    7624  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7625 }
    7626 
    7628 // class VmaBlockMetadata_Linear
    7629 
    7630 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7631  VmaBlockMetadata(hAllocator),
    7632  m_SumFreeSize(0),
    7633  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7634  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7635  m_1stVectorIndex(0),
    7636  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7637  m_1stNullItemsBeginCount(0),
    7638  m_1stNullItemsMiddleCount(0),
    7639  m_2ndNullItemsCount(0)
    7640 {
    7641 }
    7642 
    7643 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7644 {
    7645 }
    7646 
    7647 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7648 {
    7649  VmaBlockMetadata::Init(size);
    7650  m_SumFreeSize = size;
    7651 }
    7652 
    7653 bool VmaBlockMetadata_Linear::Validate() const
    7654 {
    7655  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7656  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7657 
    7658  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7659  VMA_VALIDATE(!suballocations1st.empty() ||
    7660  suballocations2nd.empty() ||
    7661  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7662 
    7663  if(!suballocations1st.empty())
    7664  {
    7665  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7666  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7667  // Null item at the end should be just pop_back().
    7668  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7669  }
    7670  if(!suballocations2nd.empty())
    7671  {
    7672  // Null item at the end should be just pop_back().
    7673  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7674  }
    7675 
    7676  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7677  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7678 
    7679  VkDeviceSize sumUsedSize = 0;
    7680  const size_t suballoc1stCount = suballocations1st.size();
    7681  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7682 
    7683  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7684  {
    7685  const size_t suballoc2ndCount = suballocations2nd.size();
    7686  size_t nullItem2ndCount = 0;
    7687  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7688  {
    7689  const VmaSuballocation& suballoc = suballocations2nd[i];
    7690  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7693  VMA_VALIDATE(suballoc.offset >= offset);
    7694 
    7695  if(!currFree)
    7696  {
    7697  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7698  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7699  sumUsedSize += suballoc.size;
    7700  }
    7701  else
    7702  {
    7703  ++nullItem2ndCount;
    7704  }
    7705 
    7706  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7707  }
    7708 
    7709  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7710  }
    7711 
    7712  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7713  {
    7714  const VmaSuballocation& suballoc = suballocations1st[i];
    7715  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7716  suballoc.hAllocation == VK_NULL_HANDLE);
    7717  }
    7718 
    7719  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7720 
    7721  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7722  {
    7723  const VmaSuballocation& suballoc = suballocations1st[i];
    7724  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7725 
    7726  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7727  VMA_VALIDATE(suballoc.offset >= offset);
    7728  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7729 
    7730  if(!currFree)
    7731  {
    7732  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7733  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7734  sumUsedSize += suballoc.size;
    7735  }
    7736  else
    7737  {
    7738  ++nullItem1stCount;
    7739  }
    7740 
    7741  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7742  }
    7743  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7744 
    7745  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7746  {
    7747  const size_t suballoc2ndCount = suballocations2nd.size();
    7748  size_t nullItem2ndCount = 0;
    7749  for(size_t i = suballoc2ndCount; i--; )
    7750  {
    7751  const VmaSuballocation& suballoc = suballocations2nd[i];
    7752  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7753 
    7754  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7755  VMA_VALIDATE(suballoc.offset >= offset);
    7756 
    7757  if(!currFree)
    7758  {
    7759  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7760  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7761  sumUsedSize += suballoc.size;
    7762  }
    7763  else
    7764  {
    7765  ++nullItem2ndCount;
    7766  }
    7767 
    7768  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7769  }
    7770 
    7771  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7772  }
    7773 
    7774  VMA_VALIDATE(offset <= GetSize());
    7775  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7776 
    7777  return true;
    7778 }
    7779 
    7780 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7781 {
    7782  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7783  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7784 }
    7785 
    7786 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7787 {
    7788  const VkDeviceSize size = GetSize();
    7789 
    7790  /*
    7791  We don't consider gaps inside allocation vectors with freed allocations because
    7792  they are not suitable for reuse in linear allocator. We consider only space that
    7793  is available for new allocations.
    7794  */
    7795  if(IsEmpty())
    7796  {
    7797  return size;
    7798  }
    7799 
    7800  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7801 
    7802  switch(m_2ndVectorMode)
    7803  {
    7804  case SECOND_VECTOR_EMPTY:
    7805  /*
    7806  Available space is after end of 1st, as well as before beginning of 1st (which
    7807  whould make it a ring buffer).
    7808  */
    7809  {
    7810  const size_t suballocations1stCount = suballocations1st.size();
    7811  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7812  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7813  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7814  return VMA_MAX(
    7815  firstSuballoc.offset,
    7816  size - (lastSuballoc.offset + lastSuballoc.size));
    7817  }
    7818  break;
    7819 
    7820  case SECOND_VECTOR_RING_BUFFER:
    7821  /*
    7822  Available space is only between end of 2nd and beginning of 1st.
    7823  */
    7824  {
    7825  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7826  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    7827  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    7828  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    7829  }
    7830  break;
    7831 
    7832  case SECOND_VECTOR_DOUBLE_STACK:
    7833  /*
    7834  Available space is only between end of 1st and top of 2nd.
    7835  */
    7836  {
    7837  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7838  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    7839  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    7840  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    7841  }
    7842  break;
    7843 
    7844  default:
    7845  VMA_ASSERT(0);
    7846  return 0;
    7847  }
    7848 }
    7849 
    7850 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7851 {
    7852  const VkDeviceSize size = GetSize();
    7853  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7854  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7855  const size_t suballoc1stCount = suballocations1st.size();
    7856  const size_t suballoc2ndCount = suballocations2nd.size();
    7857 
    7858  outInfo.blockCount = 1;
    7859  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    7860  outInfo.unusedRangeCount = 0;
    7861  outInfo.usedBytes = 0;
    7862  outInfo.allocationSizeMin = UINT64_MAX;
    7863  outInfo.allocationSizeMax = 0;
    7864  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7865  outInfo.unusedRangeSizeMax = 0;
    7866 
    7867  VkDeviceSize lastOffset = 0;
    7868 
    7869  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7870  {
    7871  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    7872  size_t nextAlloc2ndIndex = 0;
    7873  while(lastOffset < freeSpace2ndTo1stEnd)
    7874  {
    7875  // Find next non-null allocation or move nextAllocIndex to the end.
    7876  while(nextAlloc2ndIndex < suballoc2ndCount &&
    7877  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7878  {
    7879  ++nextAlloc2ndIndex;
    7880  }
    7881 
    7882  // Found non-null allocation.
    7883  if(nextAlloc2ndIndex < suballoc2ndCount)
    7884  {
    7885  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7886 
    7887  // 1. Process free space before this allocation.
    7888  if(lastOffset < suballoc.offset)
    7889  {
    7890  // There is free space from lastOffset to suballoc.offset.
    7891  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7892  ++outInfo.unusedRangeCount;
    7893  outInfo.unusedBytes += unusedRangeSize;
    7894  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7895  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7896  }
    7897 
    7898  // 2. Process this allocation.
    7899  // There is allocation with suballoc.offset, suballoc.size.
    7900  outInfo.usedBytes += suballoc.size;
    7901  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7902  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7903 
    7904  // 3. Prepare for next iteration.
    7905  lastOffset = suballoc.offset + suballoc.size;
    7906  ++nextAlloc2ndIndex;
    7907  }
    7908  // We are at the end.
    7909  else
    7910  {
    7911  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    7912  if(lastOffset < freeSpace2ndTo1stEnd)
    7913  {
    7914  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    7915  ++outInfo.unusedRangeCount;
    7916  outInfo.unusedBytes += unusedRangeSize;
    7917  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7918  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7919  }
    7920 
    7921  // End of loop.
    7922  lastOffset = freeSpace2ndTo1stEnd;
    7923  }
    7924  }
    7925  }
    7926 
    7927  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    7928  const VkDeviceSize freeSpace1stTo2ndEnd =
    7929  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    7930  while(lastOffset < freeSpace1stTo2ndEnd)
    7931  {
    7932  // Find next non-null allocation or move nextAllocIndex to the end.
    7933  while(nextAlloc1stIndex < suballoc1stCount &&
    7934  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    7935  {
    7936  ++nextAlloc1stIndex;
    7937  }
    7938 
    7939  // Found non-null allocation.
    7940  if(nextAlloc1stIndex < suballoc1stCount)
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    7943 
    7944  // 1. Process free space before this allocation.
    7945  if(lastOffset < suballoc.offset)
    7946  {
    7947  // There is free space from lastOffset to suballoc.offset.
    7948  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    7949  ++outInfo.unusedRangeCount;
    7950  outInfo.unusedBytes += unusedRangeSize;
    7951  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7952  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7953  }
    7954 
    7955  // 2. Process this allocation.
    7956  // There is allocation with suballoc.offset, suballoc.size.
    7957  outInfo.usedBytes += suballoc.size;
    7958  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7959  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    7960 
    7961  // 3. Prepare for next iteration.
    7962  lastOffset = suballoc.offset + suballoc.size;
    7963  ++nextAlloc1stIndex;
    7964  }
    7965  // We are at the end.
    7966  else
    7967  {
    7968  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    7969  if(lastOffset < freeSpace1stTo2ndEnd)
    7970  {
    7971  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    7972  ++outInfo.unusedRangeCount;
    7973  outInfo.unusedBytes += unusedRangeSize;
    7974  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    7975  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    7976  }
    7977 
    7978  // End of loop.
    7979  lastOffset = freeSpace1stTo2ndEnd;
    7980  }
    7981  }
    7982 
    7983  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7984  {
    7985  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    7986  while(lastOffset < size)
    7987  {
    7988  // Find next non-null allocation or move nextAllocIndex to the end.
    7989  while(nextAlloc2ndIndex != SIZE_MAX &&
    7990  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    7991  {
    7992  --nextAlloc2ndIndex;
    7993  }
    7994 
    7995  // Found non-null allocation.
    7996  if(nextAlloc2ndIndex != SIZE_MAX)
    7997  {
    7998  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    7999 
    8000  // 1. Process free space before this allocation.
    8001  if(lastOffset < suballoc.offset)
    8002  {
    8003  // There is free space from lastOffset to suballoc.offset.
    8004  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8005  ++outInfo.unusedRangeCount;
    8006  outInfo.unusedBytes += unusedRangeSize;
    8007  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8008  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8009  }
    8010 
    8011  // 2. Process this allocation.
    8012  // There is allocation with suballoc.offset, suballoc.size.
    8013  outInfo.usedBytes += suballoc.size;
    8014  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8015  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8016 
    8017  // 3. Prepare for next iteration.
    8018  lastOffset = suballoc.offset + suballoc.size;
    8019  --nextAlloc2ndIndex;
    8020  }
    8021  // We are at the end.
    8022  else
    8023  {
    8024  // There is free space from lastOffset to size.
    8025  if(lastOffset < size)
    8026  {
    8027  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8028  ++outInfo.unusedRangeCount;
    8029  outInfo.unusedBytes += unusedRangeSize;
    8030  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8031  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8032  }
    8033 
    8034  // End of loop.
    8035  lastOffset = size;
    8036  }
    8037  }
    8038  }
    8039 
    8040  outInfo.unusedBytes = size - outInfo.usedBytes;
    8041 }
    8042 
    8043 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8044 {
    8045  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8046  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8047  const VkDeviceSize size = GetSize();
    8048  const size_t suballoc1stCount = suballocations1st.size();
    8049  const size_t suballoc2ndCount = suballocations2nd.size();
    8050 
    8051  inoutStats.size += size;
    8052 
    8053  VkDeviceSize lastOffset = 0;
    8054 
    8055  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8056  {
    8057  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8058  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8059  while(lastOffset < freeSpace2ndTo1stEnd)
    8060  {
    8061  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8062  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8063  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8064  {
    8065  ++nextAlloc2ndIndex;
    8066  }
    8067 
    8068  // Found non-null allocation.
    8069  if(nextAlloc2ndIndex < suballoc2ndCount)
    8070  {
    8071  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8072 
    8073  // 1. Process free space before this allocation.
    8074  if(lastOffset < suballoc.offset)
    8075  {
    8076  // There is free space from lastOffset to suballoc.offset.
    8077  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8078  inoutStats.unusedSize += unusedRangeSize;
    8079  ++inoutStats.unusedRangeCount;
    8080  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  ++inoutStats.allocationCount;
    8086 
    8087  // 3. Prepare for next iteration.
    8088  lastOffset = suballoc.offset + suballoc.size;
    8089  ++nextAlloc2ndIndex;
    8090  }
    8091  // We are at the end.
    8092  else
    8093  {
    8094  if(lastOffset < freeSpace2ndTo1stEnd)
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8098  inoutStats.unusedSize += unusedRangeSize;
    8099  ++inoutStats.unusedRangeCount;
    8100  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8101  }
    8102 
    8103  // End of loop.
    8104  lastOffset = freeSpace2ndTo1stEnd;
    8105  }
    8106  }
    8107  }
    8108 
    8109  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8110  const VkDeviceSize freeSpace1stTo2ndEnd =
    8111  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8112  while(lastOffset < freeSpace1stTo2ndEnd)
    8113  {
    8114  // Find next non-null allocation or move nextAllocIndex to the end.
    8115  while(nextAlloc1stIndex < suballoc1stCount &&
    8116  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8117  {
    8118  ++nextAlloc1stIndex;
    8119  }
    8120 
    8121  // Found non-null allocation.
    8122  if(nextAlloc1stIndex < suballoc1stCount)
    8123  {
    8124  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8125 
    8126  // 1. Process free space before this allocation.
    8127  if(lastOffset < suballoc.offset)
    8128  {
    8129  // There is free space from lastOffset to suballoc.offset.
    8130  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8131  inoutStats.unusedSize += unusedRangeSize;
    8132  ++inoutStats.unusedRangeCount;
    8133  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8134  }
    8135 
    8136  // 2. Process this allocation.
    8137  // There is allocation with suballoc.offset, suballoc.size.
    8138  ++inoutStats.allocationCount;
    8139 
    8140  // 3. Prepare for next iteration.
    8141  lastOffset = suballoc.offset + suballoc.size;
    8142  ++nextAlloc1stIndex;
    8143  }
    8144  // We are at the end.
    8145  else
    8146  {
    8147  if(lastOffset < freeSpace1stTo2ndEnd)
    8148  {
    8149  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8150  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8151  inoutStats.unusedSize += unusedRangeSize;
    8152  ++inoutStats.unusedRangeCount;
    8153  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8154  }
    8155 
    8156  // End of loop.
    8157  lastOffset = freeSpace1stTo2ndEnd;
    8158  }
    8159  }
    8160 
    8161  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8162  {
    8163  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8164  while(lastOffset < size)
    8165  {
    8166  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8167  while(nextAlloc2ndIndex != SIZE_MAX &&
    8168  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8169  {
    8170  --nextAlloc2ndIndex;
    8171  }
    8172 
    8173  // Found non-null allocation.
    8174  if(nextAlloc2ndIndex != SIZE_MAX)
    8175  {
    8176  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8177 
    8178  // 1. Process free space before this allocation.
    8179  if(lastOffset < suballoc.offset)
    8180  {
    8181  // There is free space from lastOffset to suballoc.offset.
    8182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8183  inoutStats.unusedSize += unusedRangeSize;
    8184  ++inoutStats.unusedRangeCount;
    8185  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8186  }
    8187 
    8188  // 2. Process this allocation.
    8189  // There is allocation with suballoc.offset, suballoc.size.
    8190  ++inoutStats.allocationCount;
    8191 
    8192  // 3. Prepare for next iteration.
    8193  lastOffset = suballoc.offset + suballoc.size;
    8194  --nextAlloc2ndIndex;
    8195  }
    8196  // We are at the end.
    8197  else
    8198  {
    8199  if(lastOffset < size)
    8200  {
    8201  // There is free space from lastOffset to size.
    8202  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8203  inoutStats.unusedSize += unusedRangeSize;
    8204  ++inoutStats.unusedRangeCount;
    8205  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8206  }
    8207 
    8208  // End of loop.
    8209  lastOffset = size;
    8210  }
    8211  }
    8212  }
    8213 }
    8214 
    8215 #if VMA_STATS_STRING_ENABLED
    8216 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8217 {
    8218  const VkDeviceSize size = GetSize();
    8219  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8220  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8221  const size_t suballoc1stCount = suballocations1st.size();
    8222  const size_t suballoc2ndCount = suballocations2nd.size();
    8223 
    8224  // FIRST PASS
    8225 
    8226  size_t unusedRangeCount = 0;
    8227  VkDeviceSize usedBytes = 0;
    8228 
    8229  VkDeviceSize lastOffset = 0;
    8230 
    8231  size_t alloc2ndCount = 0;
    8232  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8233  {
    8234  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8235  size_t nextAlloc2ndIndex = 0;
    8236  while(lastOffset < freeSpace2ndTo1stEnd)
    8237  {
    8238  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8239  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8240  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8241  {
    8242  ++nextAlloc2ndIndex;
    8243  }
    8244 
    8245  // Found non-null allocation.
    8246  if(nextAlloc2ndIndex < suballoc2ndCount)
    8247  {
    8248  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8249 
    8250  // 1. Process free space before this allocation.
    8251  if(lastOffset < suballoc.offset)
    8252  {
    8253  // There is free space from lastOffset to suballoc.offset.
    8254  ++unusedRangeCount;
    8255  }
    8256 
    8257  // 2. Process this allocation.
    8258  // There is allocation with suballoc.offset, suballoc.size.
    8259  ++alloc2ndCount;
    8260  usedBytes += suballoc.size;
    8261 
    8262  // 3. Prepare for next iteration.
    8263  lastOffset = suballoc.offset + suballoc.size;
    8264  ++nextAlloc2ndIndex;
    8265  }
    8266  // We are at the end.
    8267  else
    8268  {
    8269  if(lastOffset < freeSpace2ndTo1stEnd)
    8270  {
    8271  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8272  ++unusedRangeCount;
    8273  }
    8274 
    8275  // End of loop.
    8276  lastOffset = freeSpace2ndTo1stEnd;
    8277  }
    8278  }
    8279  }
    8280 
    8281  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8282  size_t alloc1stCount = 0;
    8283  const VkDeviceSize freeSpace1stTo2ndEnd =
    8284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8285  while(lastOffset < freeSpace1stTo2ndEnd)
    8286  {
    8287  // Find next non-null allocation or move nextAllocIndex to the end.
    8288  while(nextAlloc1stIndex < suballoc1stCount &&
    8289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8290  {
    8291  ++nextAlloc1stIndex;
    8292  }
    8293 
    8294  // Found non-null allocation.
    8295  if(nextAlloc1stIndex < suballoc1stCount)
    8296  {
    8297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8298 
    8299  // 1. Process free space before this allocation.
    8300  if(lastOffset < suballoc.offset)
    8301  {
    8302  // There is free space from lastOffset to suballoc.offset.
    8303  ++unusedRangeCount;
    8304  }
    8305 
    8306  // 2. Process this allocation.
    8307  // There is allocation with suballoc.offset, suballoc.size.
    8308  ++alloc1stCount;
    8309  usedBytes += suballoc.size;
    8310 
    8311  // 3. Prepare for next iteration.
    8312  lastOffset = suballoc.offset + suballoc.size;
    8313  ++nextAlloc1stIndex;
    8314  }
    8315  // We are at the end.
    8316  else
    8317  {
    8318  if(lastOffset < size)
    8319  {
    8320  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8321  ++unusedRangeCount;
    8322  }
    8323 
    8324  // End of loop.
    8325  lastOffset = freeSpace1stTo2ndEnd;
    8326  }
    8327  }
    8328 
    8329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8330  {
    8331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8332  while(lastOffset < size)
    8333  {
    8334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8335  while(nextAlloc2ndIndex != SIZE_MAX &&
    8336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8337  {
    8338  --nextAlloc2ndIndex;
    8339  }
    8340 
    8341  // Found non-null allocation.
    8342  if(nextAlloc2ndIndex != SIZE_MAX)
    8343  {
    8344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8345 
    8346  // 1. Process free space before this allocation.
    8347  if(lastOffset < suballoc.offset)
    8348  {
    8349  // There is free space from lastOffset to suballoc.offset.
    8350  ++unusedRangeCount;
    8351  }
    8352 
    8353  // 2. Process this allocation.
    8354  // There is allocation with suballoc.offset, suballoc.size.
    8355  ++alloc2ndCount;
    8356  usedBytes += suballoc.size;
    8357 
    8358  // 3. Prepare for next iteration.
    8359  lastOffset = suballoc.offset + suballoc.size;
    8360  --nextAlloc2ndIndex;
    8361  }
    8362  // We are at the end.
    8363  else
    8364  {
    8365  if(lastOffset < size)
    8366  {
    8367  // There is free space from lastOffset to size.
    8368  ++unusedRangeCount;
    8369  }
    8370 
    8371  // End of loop.
    8372  lastOffset = size;
    8373  }
    8374  }
    8375  }
    8376 
    8377  const VkDeviceSize unusedBytes = size - usedBytes;
    8378  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8379 
    8380  // SECOND PASS
    8381  lastOffset = 0;
    8382 
    8383  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8384  {
    8385  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8386  size_t nextAlloc2ndIndex = 0;
    8387  while(lastOffset < freeSpace2ndTo1stEnd)
    8388  {
    8389  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8390  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8391  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8392  {
    8393  ++nextAlloc2ndIndex;
    8394  }
    8395 
    8396  // Found non-null allocation.
    8397  if(nextAlloc2ndIndex < suballoc2ndCount)
    8398  {
    8399  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8400 
    8401  // 1. Process free space before this allocation.
    8402  if(lastOffset < suballoc.offset)
    8403  {
    8404  // There is free space from lastOffset to suballoc.offset.
    8405  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8406  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8407  }
    8408 
    8409  // 2. Process this allocation.
    8410  // There is allocation with suballoc.offset, suballoc.size.
    8411  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8412 
    8413  // 3. Prepare for next iteration.
    8414  lastOffset = suballoc.offset + suballoc.size;
    8415  ++nextAlloc2ndIndex;
    8416  }
    8417  // We are at the end.
    8418  else
    8419  {
    8420  if(lastOffset < freeSpace2ndTo1stEnd)
    8421  {
    8422  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8423  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8424  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8425  }
    8426 
    8427  // End of loop.
    8428  lastOffset = freeSpace2ndTo1stEnd;
    8429  }
    8430  }
    8431  }
    8432 
    8433  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8434  while(lastOffset < freeSpace1stTo2ndEnd)
    8435  {
    8436  // Find next non-null allocation or move nextAllocIndex to the end.
    8437  while(nextAlloc1stIndex < suballoc1stCount &&
    8438  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8439  {
    8440  ++nextAlloc1stIndex;
    8441  }
    8442 
    8443  // Found non-null allocation.
    8444  if(nextAlloc1stIndex < suballoc1stCount)
    8445  {
    8446  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8447 
    8448  // 1. Process free space before this allocation.
    8449  if(lastOffset < suballoc.offset)
    8450  {
    8451  // There is free space from lastOffset to suballoc.offset.
    8452  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8453  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8454  }
    8455 
    8456  // 2. Process this allocation.
    8457  // There is allocation with suballoc.offset, suballoc.size.
    8458  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8459 
    8460  // 3. Prepare for next iteration.
    8461  lastOffset = suballoc.offset + suballoc.size;
    8462  ++nextAlloc1stIndex;
    8463  }
    8464  // We are at the end.
    8465  else
    8466  {
    8467  if(lastOffset < freeSpace1stTo2ndEnd)
    8468  {
    8469  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8470  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8471  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8472  }
    8473 
    8474  // End of loop.
    8475  lastOffset = freeSpace1stTo2ndEnd;
    8476  }
    8477  }
    8478 
    8479  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8480  {
    8481  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8482  while(lastOffset < size)
    8483  {
    8484  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8485  while(nextAlloc2ndIndex != SIZE_MAX &&
    8486  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8487  {
    8488  --nextAlloc2ndIndex;
    8489  }
    8490 
    8491  // Found non-null allocation.
    8492  if(nextAlloc2ndIndex != SIZE_MAX)
    8493  {
    8494  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8495 
    8496  // 1. Process free space before this allocation.
    8497  if(lastOffset < suballoc.offset)
    8498  {
    8499  // There is free space from lastOffset to suballoc.offset.
    8500  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8501  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8502  }
    8503 
    8504  // 2. Process this allocation.
    8505  // There is allocation with suballoc.offset, suballoc.size.
    8506  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8507 
    8508  // 3. Prepare for next iteration.
    8509  lastOffset = suballoc.offset + suballoc.size;
    8510  --nextAlloc2ndIndex;
    8511  }
    8512  // We are at the end.
    8513  else
    8514  {
    8515  if(lastOffset < size)
    8516  {
    8517  // There is free space from lastOffset to size.
    8518  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8519  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8520  }
    8521 
    8522  // End of loop.
    8523  lastOffset = size;
    8524  }
    8525  }
    8526  }
    8527 
    8528  PrintDetailedMap_End(json);
    8529 }
    8530 #endif // #if VMA_STATS_STRING_ENABLED
    8531 
    8532 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8533  uint32_t currentFrameIndex,
    8534  uint32_t frameInUseCount,
    8535  VkDeviceSize bufferImageGranularity,
    8536  VkDeviceSize allocSize,
    8537  VkDeviceSize allocAlignment,
    8538  bool upperAddress,
    8539  VmaSuballocationType allocType,
    8540  bool canMakeOtherLost,
    8541  uint32_t strategy,
    8542  VmaAllocationRequest* pAllocationRequest)
    8543 {
    8544  VMA_ASSERT(allocSize > 0);
    8545  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8546  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8547  VMA_HEAVY_ASSERT(Validate());
    8548 
    8549  const VkDeviceSize size = GetSize();
    8550  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8551  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8552 
    8553  if(upperAddress)
    8554  {
    8555  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8556  {
    8557  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8558  return false;
    8559  }
    8560 
    8561  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8562  if(allocSize > size)
    8563  {
    8564  return false;
    8565  }
    8566  VkDeviceSize resultBaseOffset = size - allocSize;
    8567  if(!suballocations2nd.empty())
    8568  {
    8569  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8570  resultBaseOffset = lastSuballoc.offset - allocSize;
    8571  if(allocSize > lastSuballoc.offset)
    8572  {
    8573  return false;
    8574  }
    8575  }
    8576 
    8577  // Start from offset equal to end of free space.
    8578  VkDeviceSize resultOffset = resultBaseOffset;
    8579 
    8580  // Apply VMA_DEBUG_MARGIN at the end.
    8581  if(VMA_DEBUG_MARGIN > 0)
    8582  {
    8583  if(resultOffset < VMA_DEBUG_MARGIN)
    8584  {
    8585  return false;
    8586  }
    8587  resultOffset -= VMA_DEBUG_MARGIN;
    8588  }
    8589 
    8590  // Apply alignment.
    8591  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8592 
    8593  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8594  // Make bigger alignment if necessary.
    8595  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8596  {
    8597  bool bufferImageGranularityConflict = false;
    8598  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8599  {
    8600  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8601  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8602  {
    8603  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8604  {
    8605  bufferImageGranularityConflict = true;
    8606  break;
    8607  }
    8608  }
    8609  else
    8610  // Already on previous page.
    8611  break;
    8612  }
    8613  if(bufferImageGranularityConflict)
    8614  {
    8615  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8616  }
    8617  }
    8618 
    8619  // There is enough free space.
    8620  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8621  suballocations1st.back().offset + suballocations1st.back().size :
    8622  0;
    8623  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8624  {
    8625  // Check previous suballocations for BufferImageGranularity conflicts.
    8626  // If conflict exists, allocation cannot be made here.
    8627  if(bufferImageGranularity > 1)
    8628  {
    8629  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8630  {
    8631  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8633  {
    8634  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8635  {
    8636  return false;
    8637  }
    8638  }
    8639  else
    8640  {
    8641  // Already on next page.
    8642  break;
    8643  }
    8644  }
    8645  }
    8646 
    8647  // All tests passed: Success.
    8648  pAllocationRequest->offset = resultOffset;
    8649  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8650  pAllocationRequest->sumItemSize = 0;
    8651  // pAllocationRequest->item unused.
    8652  pAllocationRequest->itemsToMakeLostCount = 0;
    8653  return true;
    8654  }
    8655  }
    8656  else // !upperAddress
    8657  {
    8658  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8659  {
    8660  // Try to allocate at the end of 1st vector.
    8661 
    8662  VkDeviceSize resultBaseOffset = 0;
    8663  if(!suballocations1st.empty())
    8664  {
    8665  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8666  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8667  }
    8668 
    8669  // Start from offset equal to beginning of free space.
    8670  VkDeviceSize resultOffset = resultBaseOffset;
    8671 
    8672  // Apply VMA_DEBUG_MARGIN at the beginning.
    8673  if(VMA_DEBUG_MARGIN > 0)
    8674  {
    8675  resultOffset += VMA_DEBUG_MARGIN;
    8676  }
    8677 
    8678  // Apply alignment.
    8679  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8680 
    8681  // Check previous suballocations for BufferImageGranularity conflicts.
    8682  // Make bigger alignment if necessary.
    8683  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8684  {
    8685  bool bufferImageGranularityConflict = false;
    8686  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8687  {
    8688  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8689  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8690  {
    8691  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8692  {
    8693  bufferImageGranularityConflict = true;
    8694  break;
    8695  }
    8696  }
    8697  else
    8698  // Already on previous page.
    8699  break;
    8700  }
    8701  if(bufferImageGranularityConflict)
    8702  {
    8703  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8704  }
    8705  }
    8706 
    8707  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8708  suballocations2nd.back().offset : size;
    8709 
    8710  // There is enough free space at the end after alignment.
    8711  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8712  {
    8713  // Check next suballocations for BufferImageGranularity conflicts.
    8714  // If conflict exists, allocation cannot be made here.
    8715  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8716  {
    8717  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8718  {
    8719  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8720  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8721  {
    8722  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8723  {
    8724  return false;
    8725  }
    8726  }
    8727  else
    8728  {
    8729  // Already on previous page.
    8730  break;
    8731  }
    8732  }
    8733  }
    8734 
    8735  // All tests passed: Success.
    8736  pAllocationRequest->offset = resultOffset;
    8737  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8738  pAllocationRequest->sumItemSize = 0;
    8739  // pAllocationRequest->item unused.
    8740  pAllocationRequest->itemsToMakeLostCount = 0;
    8741  return true;
    8742  }
    8743  }
    8744 
    8745  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8746  // beginning of 1st vector as the end of free space.
    8747  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8748  {
    8749  VMA_ASSERT(!suballocations1st.empty());
    8750 
    8751  VkDeviceSize resultBaseOffset = 0;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8756  }
    8757 
    8758  // Start from offset equal to beginning of free space.
    8759  VkDeviceSize resultOffset = resultBaseOffset;
    8760 
    8761  // Apply VMA_DEBUG_MARGIN at the beginning.
    8762  if(VMA_DEBUG_MARGIN > 0)
    8763  {
    8764  resultOffset += VMA_DEBUG_MARGIN;
    8765  }
    8766 
    8767  // Apply alignment.
    8768  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8769 
    8770  // Check previous suballocations for BufferImageGranularity conflicts.
    8771  // Make bigger alignment if necessary.
    8772  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8773  {
    8774  bool bufferImageGranularityConflict = false;
    8775  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8776  {
    8777  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8778  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8779  {
    8780  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8781  {
    8782  bufferImageGranularityConflict = true;
    8783  break;
    8784  }
    8785  }
    8786  else
    8787  // Already on previous page.
    8788  break;
    8789  }
    8790  if(bufferImageGranularityConflict)
    8791  {
    8792  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8793  }
    8794  }
    8795 
    8796  pAllocationRequest->itemsToMakeLostCount = 0;
    8797  pAllocationRequest->sumItemSize = 0;
    8798  size_t index1st = m_1stNullItemsBeginCount;
    8799 
    8800  if(canMakeOtherLost)
    8801  {
    8802  while(index1st < suballocations1st.size() &&
    8803  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8804  {
    8805  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8806  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8807  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8808  {
    8809  // No problem.
    8810  }
    8811  else
    8812  {
    8813  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8814  if(suballoc.hAllocation->CanBecomeLost() &&
    8815  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8816  {
    8817  ++pAllocationRequest->itemsToMakeLostCount;
    8818  pAllocationRequest->sumItemSize += suballoc.size;
    8819  }
    8820  else
    8821  {
    8822  return false;
    8823  }
    8824  }
    8825  ++index1st;
    8826  }
    8827 
    8828  // Check next suballocations for BufferImageGranularity conflicts.
    8829  // If conflict exists, we must mark more allocations lost or fail.
    8830  if(bufferImageGranularity > 1)
    8831  {
    8832  while(index1st < suballocations1st.size())
    8833  {
    8834  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8835  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    8836  {
    8837  if(suballoc.hAllocation != VK_NULL_HANDLE)
    8838  {
    8839  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    8840  if(suballoc.hAllocation->CanBecomeLost() &&
    8841  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8842  {
    8843  ++pAllocationRequest->itemsToMakeLostCount;
    8844  pAllocationRequest->sumItemSize += suballoc.size;
    8845  }
    8846  else
    8847  {
    8848  return false;
    8849  }
    8850  }
    8851  }
    8852  else
    8853  {
    8854  // Already on next page.
    8855  break;
    8856  }
    8857  ++index1st;
    8858  }
    8859  }
    8860  }
    8861 
    8862  // There is enough free space at the end after alignment.
    8863  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    8864  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    8865  {
    8866  // Check next suballocations for BufferImageGranularity conflicts.
    8867  // If conflict exists, allocation cannot be made here.
    8868  if(bufferImageGranularity > 1)
    8869  {
    8870  for(size_t nextSuballocIndex = index1st;
    8871  nextSuballocIndex < suballocations1st.size();
    8872  nextSuballocIndex++)
    8873  {
    8874  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    8875  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8876  {
    8877  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8878  {
    8879  return false;
    8880  }
    8881  }
    8882  else
    8883  {
    8884  // Already on next page.
    8885  break;
    8886  }
    8887  }
    8888  }
    8889 
    8890  // All tests passed: Success.
    8891  pAllocationRequest->offset = resultOffset;
    8892  pAllocationRequest->sumFreeSize =
    8893  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    8894  - resultBaseOffset
    8895  - pAllocationRequest->sumItemSize;
    8896  // pAllocationRequest->item unused.
    8897  return true;
    8898  }
    8899  }
    8900  }
    8901 
    8902  return false;
    8903 }
    8904 
    8905 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    8906  uint32_t currentFrameIndex,
    8907  uint32_t frameInUseCount,
    8908  VmaAllocationRequest* pAllocationRequest)
    8909 {
    8910  if(pAllocationRequest->itemsToMakeLostCount == 0)
    8911  {
    8912  return true;
    8913  }
    8914 
    8915  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    8916 
    8917  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8918  size_t index1st = m_1stNullItemsBeginCount;
    8919  size_t madeLostCount = 0;
    8920  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    8921  {
    8922  VMA_ASSERT(index1st < suballocations1st.size());
    8923  VmaSuballocation& suballoc = suballocations1st[index1st];
    8924  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    8925  {
    8926  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8927  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    8928  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8929  {
    8930  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8931  suballoc.hAllocation = VK_NULL_HANDLE;
    8932  m_SumFreeSize += suballoc.size;
    8933  ++m_1stNullItemsMiddleCount;
    8934  ++madeLostCount;
    8935  }
    8936  else
    8937  {
    8938  return false;
    8939  }
    8940  }
    8941  ++index1st;
    8942  }
    8943 
    8944  CleanupAfterFree();
    8945  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    8946 
    8947  return true;
    8948 }
    8949 
    8950 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8951 {
    8952  uint32_t lostAllocationCount = 0;
    8953 
    8954  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8955  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8956  {
    8957  VmaSuballocation& suballoc = suballocations1st[i];
    8958  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8959  suballoc.hAllocation->CanBecomeLost() &&
    8960  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8961  {
    8962  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8963  suballoc.hAllocation = VK_NULL_HANDLE;
    8964  ++m_1stNullItemsMiddleCount;
    8965  m_SumFreeSize += suballoc.size;
    8966  ++lostAllocationCount;
    8967  }
    8968  }
    8969 
    8970  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8971  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    8972  {
    8973  VmaSuballocation& suballoc = suballocations2nd[i];
    8974  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    8975  suballoc.hAllocation->CanBecomeLost() &&
    8976  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8977  {
    8978  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8979  suballoc.hAllocation = VK_NULL_HANDLE;
    8980  ++m_2ndNullItemsCount;
    8981  ++lostAllocationCount;
    8982  }
    8983  }
    8984 
    8985  if(lostAllocationCount)
    8986  {
    8987  CleanupAfterFree();
    8988  }
    8989 
    8990  return lostAllocationCount;
    8991 }
    8992 
    8993 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    8994 {
    8995  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8996  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    8997  {
    8998  const VmaSuballocation& suballoc = suballocations1st[i];
    8999  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9000  {
    9001  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9002  {
    9003  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9004  return VK_ERROR_VALIDATION_FAILED_EXT;
    9005  }
    9006  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9007  {
    9008  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9009  return VK_ERROR_VALIDATION_FAILED_EXT;
    9010  }
    9011  }
    9012  }
    9013 
    9014  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9015  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9016  {
    9017  const VmaSuballocation& suballoc = suballocations2nd[i];
    9018  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9019  {
    9020  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9021  {
    9022  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9023  return VK_ERROR_VALIDATION_FAILED_EXT;
    9024  }
    9025  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9026  {
    9027  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9028  return VK_ERROR_VALIDATION_FAILED_EXT;
    9029  }
    9030  }
    9031  }
    9032 
    9033  return VK_SUCCESS;
    9034 }
    9035 
    9036 void VmaBlockMetadata_Linear::Alloc(
    9037  const VmaAllocationRequest& request,
    9038  VmaSuballocationType type,
    9039  VkDeviceSize allocSize,
    9040  bool upperAddress,
    9041  VmaAllocation hAllocation)
    9042 {
    9043  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9044 
    9045  if(upperAddress)
    9046  {
    9047  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9048  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9049  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9050  suballocations2nd.push_back(newSuballoc);
    9051  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9052  }
    9053  else
    9054  {
    9055  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9056 
    9057  // First allocation.
    9058  if(suballocations1st.empty())
    9059  {
    9060  suballocations1st.push_back(newSuballoc);
    9061  }
    9062  else
    9063  {
    9064  // New allocation at the end of 1st vector.
    9065  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9066  {
    9067  // Check if it fits before the end of the block.
    9068  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9069  suballocations1st.push_back(newSuballoc);
    9070  }
    9071  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9072  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9073  {
    9074  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9075 
    9076  switch(m_2ndVectorMode)
    9077  {
    9078  case SECOND_VECTOR_EMPTY:
    9079  // First allocation from second part ring buffer.
    9080  VMA_ASSERT(suballocations2nd.empty());
    9081  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9082  break;
    9083  case SECOND_VECTOR_RING_BUFFER:
    9084  // 2-part ring buffer is already started.
    9085  VMA_ASSERT(!suballocations2nd.empty());
    9086  break;
    9087  case SECOND_VECTOR_DOUBLE_STACK:
    9088  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9089  break;
    9090  default:
    9091  VMA_ASSERT(0);
    9092  }
    9093 
    9094  suballocations2nd.push_back(newSuballoc);
    9095  }
    9096  else
    9097  {
    9098  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9099  }
    9100  }
    9101  }
    9102 
    9103  m_SumFreeSize -= newSuballoc.size;
    9104 }
    9105 
    9106 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9107 {
    9108  FreeAtOffset(allocation->GetOffset());
    9109 }
    9110 
    9111 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9112 {
    9113  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9114  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9115 
    9116  if(!suballocations1st.empty())
    9117  {
    9118  // First allocation: Mark it as next empty at the beginning.
    9119  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9120  if(firstSuballoc.offset == offset)
    9121  {
    9122  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9123  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9124  m_SumFreeSize += firstSuballoc.size;
    9125  ++m_1stNullItemsBeginCount;
    9126  CleanupAfterFree();
    9127  return;
    9128  }
    9129  }
    9130 
    9131  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9132  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9133  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9134  {
    9135  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9136  if(lastSuballoc.offset == offset)
    9137  {
    9138  m_SumFreeSize += lastSuballoc.size;
    9139  suballocations2nd.pop_back();
    9140  CleanupAfterFree();
    9141  return;
    9142  }
    9143  }
    9144  // Last allocation in 1st vector.
    9145  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9146  {
    9147  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9148  if(lastSuballoc.offset == offset)
    9149  {
    9150  m_SumFreeSize += lastSuballoc.size;
    9151  suballocations1st.pop_back();
    9152  CleanupAfterFree();
    9153  return;
    9154  }
    9155  }
    9156 
    9157  // Item from the middle of 1st vector.
    9158  {
    9159  VmaSuballocation refSuballoc;
    9160  refSuballoc.offset = offset;
    9161  // Rest of members stays uninitialized intentionally for better performance.
    9162  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9163  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9164  suballocations1st.end(),
    9165  refSuballoc);
    9166  if(it != suballocations1st.end())
    9167  {
    9168  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  it->hAllocation = VK_NULL_HANDLE;
    9170  ++m_1stNullItemsMiddleCount;
    9171  m_SumFreeSize += it->size;
    9172  CleanupAfterFree();
    9173  return;
    9174  }
    9175  }
    9176 
    9177  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9178  {
    9179  // Item from the middle of 2nd vector.
    9180  VmaSuballocation refSuballoc;
    9181  refSuballoc.offset = offset;
    9182  // Rest of members stays uninitialized intentionally for better performance.
    9183  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9184  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9185  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9186  if(it != suballocations2nd.end())
    9187  {
    9188  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9189  it->hAllocation = VK_NULL_HANDLE;
    9190  ++m_2ndNullItemsCount;
    9191  m_SumFreeSize += it->size;
    9192  CleanupAfterFree();
    9193  return;
    9194  }
    9195  }
    9196 
    9197  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9198 }
    9199 
    9200 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9201 {
    9202  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9203  const size_t suballocCount = AccessSuballocations1st().size();
    9204  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9205 }
    9206 
    9207 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9208 {
    9209  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9210  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9211 
    9212  if(IsEmpty())
    9213  {
    9214  suballocations1st.clear();
    9215  suballocations2nd.clear();
    9216  m_1stNullItemsBeginCount = 0;
    9217  m_1stNullItemsMiddleCount = 0;
    9218  m_2ndNullItemsCount = 0;
    9219  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9220  }
    9221  else
    9222  {
    9223  const size_t suballoc1stCount = suballocations1st.size();
    9224  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9225  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9226 
    9227  // Find more null items at the beginning of 1st vector.
    9228  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9229  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9230  {
    9231  ++m_1stNullItemsBeginCount;
    9232  --m_1stNullItemsMiddleCount;
    9233  }
    9234 
    9235  // Find more null items at the end of 1st vector.
    9236  while(m_1stNullItemsMiddleCount > 0 &&
    9237  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9238  {
    9239  --m_1stNullItemsMiddleCount;
    9240  suballocations1st.pop_back();
    9241  }
    9242 
    9243  // Find more null items at the end of 2nd vector.
    9244  while(m_2ndNullItemsCount > 0 &&
    9245  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9246  {
    9247  --m_2ndNullItemsCount;
    9248  suballocations2nd.pop_back();
    9249  }
    9250 
    9251  if(ShouldCompact1st())
    9252  {
    9253  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9254  size_t srcIndex = m_1stNullItemsBeginCount;
    9255  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9256  {
    9257  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9258  {
    9259  ++srcIndex;
    9260  }
    9261  if(dstIndex != srcIndex)
    9262  {
    9263  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9264  }
    9265  ++srcIndex;
    9266  }
    9267  suballocations1st.resize(nonNullItemCount);
    9268  m_1stNullItemsBeginCount = 0;
    9269  m_1stNullItemsMiddleCount = 0;
    9270  }
    9271 
    9272  // 2nd vector became empty.
    9273  if(suballocations2nd.empty())
    9274  {
    9275  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9276  }
    9277 
    9278  // 1st vector became empty.
    9279  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9280  {
    9281  suballocations1st.clear();
    9282  m_1stNullItemsBeginCount = 0;
    9283 
    9284  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9285  {
    9286  // Swap 1st with 2nd. Now 2nd is empty.
    9287  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9288  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9289  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9290  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9291  {
    9292  ++m_1stNullItemsBeginCount;
    9293  --m_1stNullItemsMiddleCount;
    9294  }
    9295  m_2ndNullItemsCount = 0;
    9296  m_1stVectorIndex ^= 1;
    9297  }
    9298  }
    9299  }
    9300 
    9301  VMA_HEAVY_ASSERT(Validate());
    9302 }
    9303 
    9304 
    9306 // class VmaBlockMetadata_Buddy
    9307 
    9308 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9309  VmaBlockMetadata(hAllocator),
    9310  m_Root(VMA_NULL),
    9311  m_AllocationCount(0),
    9312  m_FreeCount(1),
    9313  m_SumFreeSize(0)
    9314 {
    9315  memset(m_FreeList, 0, sizeof(m_FreeList));
    9316 }
    9317 
    9318 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9319 {
    9320  DeleteNode(m_Root);
    9321 }
    9322 
    9323 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9324 {
    9325  VmaBlockMetadata::Init(size);
    9326 
    9327  m_UsableSize = VmaPrevPow2(size);
    9328  m_SumFreeSize = m_UsableSize;
    9329 
    9330  // Calculate m_LevelCount.
    9331  m_LevelCount = 1;
    9332  while(m_LevelCount < MAX_LEVELS &&
    9333  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9334  {
    9335  ++m_LevelCount;
    9336  }
    9337 
    9338  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9339  rootNode->offset = 0;
    9340  rootNode->type = Node::TYPE_FREE;
    9341  rootNode->parent = VMA_NULL;
    9342  rootNode->buddy = VMA_NULL;
    9343 
    9344  m_Root = rootNode;
    9345  AddToFreeListFront(0, rootNode);
    9346 }
    9347 
    9348 bool VmaBlockMetadata_Buddy::Validate() const
    9349 {
    9350  // Validate tree.
    9351  ValidationContext ctx;
    9352  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9353  {
    9354  VMA_VALIDATE(false && "ValidateNode failed.");
    9355  }
    9356  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9357  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9358 
    9359  // Validate free node lists.
    9360  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9361  {
    9362  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9363  m_FreeList[level].front->free.prev == VMA_NULL);
    9364 
    9365  for(Node* node = m_FreeList[level].front;
    9366  node != VMA_NULL;
    9367  node = node->free.next)
    9368  {
    9369  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9370 
    9371  if(node->free.next == VMA_NULL)
    9372  {
    9373  VMA_VALIDATE(m_FreeList[level].back == node);
    9374  }
    9375  else
    9376  {
    9377  VMA_VALIDATE(node->free.next->free.prev == node);
    9378  }
    9379  }
    9380  }
    9381 
    9382  // Validate that free lists ar higher levels are empty.
    9383  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9384  {
    9385  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9386  }
    9387 
    9388  return true;
    9389 }
    9390 
    9391 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9392 {
    9393  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9394  {
    9395  if(m_FreeList[level].front != VMA_NULL)
    9396  {
    9397  return LevelToNodeSize(level);
    9398  }
    9399  }
    9400  return 0;
    9401 }
    9402 
    9403 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9404 {
    9405  const VkDeviceSize unusableSize = GetUnusableSize();
    9406 
    9407  outInfo.blockCount = 1;
    9408 
    9409  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9410  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9411 
    9412  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9413  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9414  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9415 
    9416  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9417 
    9418  if(unusableSize > 0)
    9419  {
    9420  ++outInfo.unusedRangeCount;
    9421  outInfo.unusedBytes += unusableSize;
    9422  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9423  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9424  }
    9425 }
    9426 
    9427 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9428 {
    9429  const VkDeviceSize unusableSize = GetUnusableSize();
    9430 
    9431  inoutStats.size += GetSize();
    9432  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9433  inoutStats.allocationCount += m_AllocationCount;
    9434  inoutStats.unusedRangeCount += m_FreeCount;
    9435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9436 
    9437  if(unusableSize > 0)
    9438  {
    9439  ++inoutStats.unusedRangeCount;
    9440  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9441  }
    9442 }
    9443 
    9444 #if VMA_STATS_STRING_ENABLED
    9445 
    9446 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9447 {
    9448  // TODO optimize
    9449  VmaStatInfo stat;
    9450  CalcAllocationStatInfo(stat);
    9451 
    9452  PrintDetailedMap_Begin(
    9453  json,
    9454  stat.unusedBytes,
    9455  stat.allocationCount,
    9456  stat.unusedRangeCount);
    9457 
    9458  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9459 
    9460  const VkDeviceSize unusableSize = GetUnusableSize();
    9461  if(unusableSize > 0)
    9462  {
    9463  PrintDetailedMap_UnusedRange(json,
    9464  m_UsableSize, // offset
    9465  unusableSize); // size
    9466  }
    9467 
    9468  PrintDetailedMap_End(json);
    9469 }
    9470 
    9471 #endif // #if VMA_STATS_STRING_ENABLED
    9472 
    9473 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9474  uint32_t currentFrameIndex,
    9475  uint32_t frameInUseCount,
    9476  VkDeviceSize bufferImageGranularity,
    9477  VkDeviceSize allocSize,
    9478  VkDeviceSize allocAlignment,
    9479  bool upperAddress,
    9480  VmaSuballocationType allocType,
    9481  bool canMakeOtherLost,
    9482  uint32_t strategy,
    9483  VmaAllocationRequest* pAllocationRequest)
    9484 {
    9485  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9486 
    9487  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9488  // Whenever it might be an OPTIMAL image...
    9489  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9490  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9491  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9492  {
    9493  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9494  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9495  }
    9496 
    9497  if(allocSize > m_UsableSize)
    9498  {
    9499  return false;
    9500  }
    9501 
    9502  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9503  for(uint32_t level = targetLevel + 1; level--; )
    9504  {
    9505  for(Node* freeNode = m_FreeList[level].front;
    9506  freeNode != VMA_NULL;
    9507  freeNode = freeNode->free.next)
    9508  {
    9509  if(freeNode->offset % allocAlignment == 0)
    9510  {
    9511  pAllocationRequest->offset = freeNode->offset;
    9512  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9513  pAllocationRequest->sumItemSize = 0;
    9514  pAllocationRequest->itemsToMakeLostCount = 0;
    9515  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9516  return true;
    9517  }
    9518  }
    9519  }
    9520 
    9521  return false;
    9522 }
    9523 
    9524 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9525  uint32_t currentFrameIndex,
    9526  uint32_t frameInUseCount,
    9527  VmaAllocationRequest* pAllocationRequest)
    9528 {
    9529  /*
    9530  Lost allocations are not supported in buddy allocator at the moment.
    9531  Support might be added in the future.
    9532  */
    9533  return pAllocationRequest->itemsToMakeLostCount == 0;
    9534 }
    9535 
    9536 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9537 {
    9538  /*
    9539  Lost allocations are not supported in buddy allocator at the moment.
    9540  Support might be added in the future.
    9541  */
    9542  return 0;
    9543 }
    9544 
    9545 void VmaBlockMetadata_Buddy::Alloc(
    9546  const VmaAllocationRequest& request,
    9547  VmaSuballocationType type,
    9548  VkDeviceSize allocSize,
    9549  bool upperAddress,
    9550  VmaAllocation hAllocation)
    9551 {
    9552  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9553  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9554 
    9555  Node* currNode = m_FreeList[currLevel].front;
    9556  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9557  while(currNode->offset != request.offset)
    9558  {
    9559  currNode = currNode->free.next;
    9560  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9561  }
    9562 
    9563  // Go down, splitting free nodes.
    9564  while(currLevel < targetLevel)
    9565  {
    9566  // currNode is already first free node at currLevel.
    9567  // Remove it from list of free nodes at this currLevel.
    9568  RemoveFromFreeList(currLevel, currNode);
    9569 
    9570  const uint32_t childrenLevel = currLevel + 1;
    9571 
    9572  // Create two free sub-nodes.
    9573  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9574  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9575 
    9576  leftChild->offset = currNode->offset;
    9577  leftChild->type = Node::TYPE_FREE;
    9578  leftChild->parent = currNode;
    9579  leftChild->buddy = rightChild;
    9580 
    9581  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9582  rightChild->type = Node::TYPE_FREE;
    9583  rightChild->parent = currNode;
    9584  rightChild->buddy = leftChild;
    9585 
    9586  // Convert current currNode to split type.
    9587  currNode->type = Node::TYPE_SPLIT;
    9588  currNode->split.leftChild = leftChild;
    9589 
    9590  // Add child nodes to free list. Order is important!
    9591  AddToFreeListFront(childrenLevel, rightChild);
    9592  AddToFreeListFront(childrenLevel, leftChild);
    9593 
    9594  ++m_FreeCount;
    9595  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9596  ++currLevel;
    9597  currNode = m_FreeList[currLevel].front;
    9598 
    9599  /*
    9600  We can be sure that currNode, as left child of node previously split,
    9601  also fullfills the alignment requirement.
    9602  */
    9603  }
    9604 
    9605  // Remove from free list.
    9606  VMA_ASSERT(currLevel == targetLevel &&
    9607  currNode != VMA_NULL &&
    9608  currNode->type == Node::TYPE_FREE);
    9609  RemoveFromFreeList(currLevel, currNode);
    9610 
    9611  // Convert to allocation node.
    9612  currNode->type = Node::TYPE_ALLOCATION;
    9613  currNode->allocation.alloc = hAllocation;
    9614 
    9615  ++m_AllocationCount;
    9616  --m_FreeCount;
    9617  m_SumFreeSize -= allocSize;
    9618 }
    9619 
    9620 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9621 {
    9622  if(node->type == Node::TYPE_SPLIT)
    9623  {
    9624  DeleteNode(node->split.leftChild->buddy);
    9625  DeleteNode(node->split.leftChild);
    9626  }
    9627 
    9628  vma_delete(GetAllocationCallbacks(), node);
    9629 }
    9630 
    9631 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9632 {
    9633  VMA_VALIDATE(level < m_LevelCount);
    9634  VMA_VALIDATE(curr->parent == parent);
    9635  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9636  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9637  switch(curr->type)
    9638  {
    9639  case Node::TYPE_FREE:
    9640  // curr->free.prev, next are validated separately.
    9641  ctx.calculatedSumFreeSize += levelNodeSize;
    9642  ++ctx.calculatedFreeCount;
    9643  break;
    9644  case Node::TYPE_ALLOCATION:
    9645  ++ctx.calculatedAllocationCount;
    9646  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9647  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9648  break;
    9649  case Node::TYPE_SPLIT:
    9650  {
    9651  const uint32_t childrenLevel = level + 1;
    9652  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9653  const Node* const leftChild = curr->split.leftChild;
    9654  VMA_VALIDATE(leftChild != VMA_NULL);
    9655  VMA_VALIDATE(leftChild->offset == curr->offset);
    9656  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9657  {
    9658  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9659  }
    9660  const Node* const rightChild = leftChild->buddy;
    9661  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9662  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9663  {
    9664  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9665  }
    9666  }
    9667  break;
    9668  default:
    9669  return false;
    9670  }
    9671 
    9672  return true;
    9673 }
    9674 
    9675 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9676 {
    9677  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9678  uint32_t level = 0;
    9679  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9680  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9681  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9682  {
    9683  ++level;
    9684  currLevelNodeSize = nextLevelNodeSize;
    9685  nextLevelNodeSize = currLevelNodeSize >> 1;
    9686  }
    9687  return level;
    9688 }
    9689 
    9690 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9691 {
    9692  // Find node and level.
    9693  Node* node = m_Root;
    9694  VkDeviceSize nodeOffset = 0;
    9695  uint32_t level = 0;
    9696  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9697  while(node->type == Node::TYPE_SPLIT)
    9698  {
    9699  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9700  if(offset < nodeOffset + nextLevelSize)
    9701  {
    9702  node = node->split.leftChild;
    9703  }
    9704  else
    9705  {
    9706  node = node->split.leftChild->buddy;
    9707  nodeOffset += nextLevelSize;
    9708  }
    9709  ++level;
    9710  levelNodeSize = nextLevelSize;
    9711  }
    9712 
    9713  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9714  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9715 
    9716  ++m_FreeCount;
    9717  --m_AllocationCount;
    9718  m_SumFreeSize += alloc->GetSize();
    9719 
    9720  node->type = Node::TYPE_FREE;
    9721 
    9722  // Join free nodes if possible.
    9723  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9724  {
    9725  RemoveFromFreeList(level, node->buddy);
    9726  Node* const parent = node->parent;
    9727 
    9728  vma_delete(GetAllocationCallbacks(), node->buddy);
    9729  vma_delete(GetAllocationCallbacks(), node);
    9730  parent->type = Node::TYPE_FREE;
    9731 
    9732  node = parent;
    9733  --level;
    9734  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9735  --m_FreeCount;
    9736  }
    9737 
    9738  AddToFreeListFront(level, node);
    9739 }
    9740 
    9741 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9742 {
    9743  switch(node->type)
    9744  {
    9745  case Node::TYPE_FREE:
    9746  ++outInfo.unusedRangeCount;
    9747  outInfo.unusedBytes += levelNodeSize;
    9748  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9749  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9750  break;
    9751  case Node::TYPE_ALLOCATION:
    9752  {
    9753  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9754  ++outInfo.allocationCount;
    9755  outInfo.usedBytes += allocSize;
    9756  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9757  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9758 
    9759  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9760  if(unusedRangeSize > 0)
    9761  {
    9762  ++outInfo.unusedRangeCount;
    9763  outInfo.unusedBytes += unusedRangeSize;
    9764  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9765  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9766  }
    9767  }
    9768  break;
    9769  case Node::TYPE_SPLIT:
    9770  {
    9771  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9772  const Node* const leftChild = node->split.leftChild;
    9773  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9774  const Node* const rightChild = leftChild->buddy;
    9775  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9776  }
    9777  break;
    9778  default:
    9779  VMA_ASSERT(0);
    9780  }
    9781 }
    9782 
    9783 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9784 {
    9785  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9786 
    9787  // List is empty.
    9788  Node* const frontNode = m_FreeList[level].front;
    9789  if(frontNode == VMA_NULL)
    9790  {
    9791  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9792  node->free.prev = node->free.next = VMA_NULL;
    9793  m_FreeList[level].front = m_FreeList[level].back = node;
    9794  }
    9795  else
    9796  {
    9797  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9798  node->free.prev = VMA_NULL;
    9799  node->free.next = frontNode;
    9800  frontNode->free.prev = node;
    9801  m_FreeList[level].front = node;
    9802  }
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9806 {
    9807  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9808 
    9809  // It is at the front.
    9810  if(node->free.prev == VMA_NULL)
    9811  {
    9812  VMA_ASSERT(m_FreeList[level].front == node);
    9813  m_FreeList[level].front = node->free.next;
    9814  }
    9815  else
    9816  {
    9817  Node* const prevFreeNode = node->free.prev;
    9818  VMA_ASSERT(prevFreeNode->free.next == node);
    9819  prevFreeNode->free.next = node->free.next;
    9820  }
    9821 
    9822  // It is at the back.
    9823  if(node->free.next == VMA_NULL)
    9824  {
    9825  VMA_ASSERT(m_FreeList[level].back == node);
    9826  m_FreeList[level].back = node->free.prev;
    9827  }
    9828  else
    9829  {
    9830  Node* const nextFreeNode = node->free.next;
    9831  VMA_ASSERT(nextFreeNode->free.prev == node);
    9832  nextFreeNode->free.prev = node->free.prev;
    9833  }
    9834 }
    9835 
    9836 #if VMA_STATS_STRING_ENABLED
    9837 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    9838 {
    9839  switch(node->type)
    9840  {
    9841  case Node::TYPE_FREE:
    9842  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    9843  break;
    9844  case Node::TYPE_ALLOCATION:
    9845  {
    9846  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    9847  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9848  if(allocSize < levelNodeSize)
    9849  {
    9850  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    9851  }
    9852  }
    9853  break;
    9854  case Node::TYPE_SPLIT:
    9855  {
    9856  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9857  const Node* const leftChild = node->split.leftChild;
    9858  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    9859  const Node* const rightChild = leftChild->buddy;
    9860  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    9861  }
    9862  break;
    9863  default:
    9864  VMA_ASSERT(0);
    9865  }
    9866 }
    9867 #endif // #if VMA_STATS_STRING_ENABLED
    9868 
    9869 
    9871 // class VmaDeviceMemoryBlock
    9872 
    9873 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    9874  m_pMetadata(VMA_NULL),
    9875  m_MemoryTypeIndex(UINT32_MAX),
    9876  m_Id(0),
    9877  m_hMemory(VK_NULL_HANDLE),
    9878  m_MapCount(0),
    9879  m_pMappedData(VMA_NULL)
    9880 {
    9881 }
    9882 
    9883 void VmaDeviceMemoryBlock::Init(
    9884  VmaAllocator hAllocator,
    9885  uint32_t newMemoryTypeIndex,
    9886  VkDeviceMemory newMemory,
    9887  VkDeviceSize newSize,
    9888  uint32_t id,
    9889  uint32_t algorithm)
    9890 {
    9891  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    9892 
    9893  m_MemoryTypeIndex = newMemoryTypeIndex;
    9894  m_Id = id;
    9895  m_hMemory = newMemory;
    9896 
    9897  switch(algorithm)
    9898  {
    9900  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    9901  break;
    9903  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    9904  break;
    9905  default:
    9906  VMA_ASSERT(0);
    9907  // Fall-through.
    9908  case 0:
    9909  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    9910  }
    9911  m_pMetadata->Init(newSize);
    9912 }
    9913 
    9914 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    9915 {
    9916  // This is the most important assert in the entire library.
    9917  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    9918  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    9919 
    9920  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    9921  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    9922  m_hMemory = VK_NULL_HANDLE;
    9923 
    9924  vma_delete(allocator, m_pMetadata);
    9925  m_pMetadata = VMA_NULL;
    9926 }
    9927 
    9928 bool VmaDeviceMemoryBlock::Validate() const
    9929 {
    9930  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    9931  (m_pMetadata->GetSize() != 0));
    9932 
    9933  return m_pMetadata->Validate();
    9934 }
    9935 
    9936 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    9937 {
    9938  void* pData = nullptr;
    9939  VkResult res = Map(hAllocator, 1, &pData);
    9940  if(res != VK_SUCCESS)
    9941  {
    9942  return res;
    9943  }
    9944 
    9945  res = m_pMetadata->CheckCorruption(pData);
    9946 
    9947  Unmap(hAllocator, 1);
    9948 
    9949  return res;
    9950 }
    9951 
    9952 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    9953 {
    9954  if(count == 0)
    9955  {
    9956  return VK_SUCCESS;
    9957  }
    9958 
    9959  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9960  if(m_MapCount != 0)
    9961  {
    9962  m_MapCount += count;
    9963  VMA_ASSERT(m_pMappedData != VMA_NULL);
    9964  if(ppData != VMA_NULL)
    9965  {
    9966  *ppData = m_pMappedData;
    9967  }
    9968  return VK_SUCCESS;
    9969  }
    9970  else
    9971  {
    9972  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    9973  hAllocator->m_hDevice,
    9974  m_hMemory,
    9975  0, // offset
    9976  VK_WHOLE_SIZE,
    9977  0, // flags
    9978  &m_pMappedData);
    9979  if(result == VK_SUCCESS)
    9980  {
    9981  if(ppData != VMA_NULL)
    9982  {
    9983  *ppData = m_pMappedData;
    9984  }
    9985  m_MapCount = count;
    9986  }
    9987  return result;
    9988  }
    9989 }
    9990 
    9991 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    9992 {
    9993  if(count == 0)
    9994  {
    9995  return;
    9996  }
    9997 
    9998  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    9999  if(m_MapCount >= count)
    10000  {
    10001  m_MapCount -= count;
    10002  if(m_MapCount == 0)
    10003  {
    10004  m_pMappedData = VMA_NULL;
    10005  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10006  }
    10007  }
    10008  else
    10009  {
    10010  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10011  }
    10012 }
    10013 
    10014 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10015 {
    10016  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10017  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10018 
    10019  void* pData;
    10020  VkResult res = Map(hAllocator, 1, &pData);
    10021  if(res != VK_SUCCESS)
    10022  {
    10023  return res;
    10024  }
    10025 
    10026  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10027  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10028 
    10029  Unmap(hAllocator, 1);
    10030 
    10031  return VK_SUCCESS;
    10032 }
    10033 
    10034 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10035 {
    10036  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10037  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10038 
    10039  void* pData;
    10040  VkResult res = Map(hAllocator, 1, &pData);
    10041  if(res != VK_SUCCESS)
    10042  {
    10043  return res;
    10044  }
    10045 
    10046  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10047  {
    10048  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10049  }
    10050  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10051  {
    10052  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10053  }
    10054 
    10055  Unmap(hAllocator, 1);
    10056 
    10057  return VK_SUCCESS;
    10058 }
    10059 
    10060 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10061  const VmaAllocator hAllocator,
    10062  const VmaAllocation hAllocation,
    10063  VkBuffer hBuffer)
    10064 {
    10065  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10066  hAllocation->GetBlock() == this);
    10067  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10068  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10069  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10070  hAllocator->m_hDevice,
    10071  hBuffer,
    10072  m_hMemory,
    10073  hAllocation->GetOffset());
    10074 }
    10075 
    10076 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10077  const VmaAllocator hAllocator,
    10078  const VmaAllocation hAllocation,
    10079  VkImage hImage)
    10080 {
    10081  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10082  hAllocation->GetBlock() == this);
    10083  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10084  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10085  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10086  hAllocator->m_hDevice,
    10087  hImage,
    10088  m_hMemory,
    10089  hAllocation->GetOffset());
    10090 }
    10091 
    10092 static void InitStatInfo(VmaStatInfo& outInfo)
    10093 {
    10094  memset(&outInfo, 0, sizeof(outInfo));
    10095  outInfo.allocationSizeMin = UINT64_MAX;
    10096  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10097 }
    10098 
    10099 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10100 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10101 {
    10102  inoutInfo.blockCount += srcInfo.blockCount;
    10103  inoutInfo.allocationCount += srcInfo.allocationCount;
    10104  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10105  inoutInfo.usedBytes += srcInfo.usedBytes;
    10106  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10107  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10108  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10109  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10110  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10111 }
    10112 
    10113 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10114 {
    10115  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10116  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10117  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10118  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10119 }
    10120 
    10121 VmaPool_T::VmaPool_T(
    10122  VmaAllocator hAllocator,
    10123  const VmaPoolCreateInfo& createInfo,
    10124  VkDeviceSize preferredBlockSize) :
    10125  m_BlockVector(
    10126  hAllocator,
    10127  createInfo.memoryTypeIndex,
    10128  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10129  createInfo.minBlockCount,
    10130  createInfo.maxBlockCount,
    10131  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10132  createInfo.frameInUseCount,
    10133  true, // isCustomPool
    10134  createInfo.blockSize != 0, // explicitBlockSize
    10135  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10136  m_Id(0)
    10137 {
    10138 }
    10139 
    10140 VmaPool_T::~VmaPool_T()
    10141 {
    10142 }
    10143 
    10144 #if VMA_STATS_STRING_ENABLED
    10145 
    10146 #endif // #if VMA_STATS_STRING_ENABLED
    10147 
    10148 VmaBlockVector::VmaBlockVector(
    10149  VmaAllocator hAllocator,
    10150  uint32_t memoryTypeIndex,
    10151  VkDeviceSize preferredBlockSize,
    10152  size_t minBlockCount,
    10153  size_t maxBlockCount,
    10154  VkDeviceSize bufferImageGranularity,
    10155  uint32_t frameInUseCount,
    10156  bool isCustomPool,
    10157  bool explicitBlockSize,
    10158  uint32_t algorithm) :
    10159  m_hAllocator(hAllocator),
    10160  m_MemoryTypeIndex(memoryTypeIndex),
    10161  m_PreferredBlockSize(preferredBlockSize),
    10162  m_MinBlockCount(minBlockCount),
    10163  m_MaxBlockCount(maxBlockCount),
    10164  m_BufferImageGranularity(bufferImageGranularity),
    10165  m_FrameInUseCount(frameInUseCount),
    10166  m_IsCustomPool(isCustomPool),
    10167  m_ExplicitBlockSize(explicitBlockSize),
    10168  m_Algorithm(algorithm),
    10169  m_HasEmptyBlock(false),
    10170  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10171  m_pDefragmentator(VMA_NULL),
    10172  m_NextBlockId(0)
    10173 {
    10174 }
    10175 
    10176 VmaBlockVector::~VmaBlockVector()
    10177 {
    10178  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10179 
    10180  for(size_t i = m_Blocks.size(); i--; )
    10181  {
    10182  m_Blocks[i]->Destroy(m_hAllocator);
    10183  vma_delete(m_hAllocator, m_Blocks[i]);
    10184  }
    10185 }
    10186 
    10187 VkResult VmaBlockVector::CreateMinBlocks()
    10188 {
    10189  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10190  {
    10191  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10192  if(res != VK_SUCCESS)
    10193  {
    10194  return res;
    10195  }
    10196  }
    10197  return VK_SUCCESS;
    10198 }
    10199 
    10200 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10201 {
    10202  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10203 
    10204  const size_t blockCount = m_Blocks.size();
    10205 
    10206  pStats->size = 0;
    10207  pStats->unusedSize = 0;
    10208  pStats->allocationCount = 0;
    10209  pStats->unusedRangeCount = 0;
    10210  pStats->unusedRangeSizeMax = 0;
    10211  pStats->blockCount = blockCount;
    10212 
    10213  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10214  {
    10215  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10216  VMA_ASSERT(pBlock);
    10217  VMA_HEAVY_ASSERT(pBlock->Validate());
    10218  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10219  }
    10220 }
    10221 
    10222 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10223 {
    10224  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10225  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10226  (VMA_DEBUG_MARGIN > 0) &&
    10227  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10228 }
    10229 
    10230 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10231 
    10232 VkResult VmaBlockVector::Allocate(
    10233  VmaPool hCurrentPool,
    10234  uint32_t currentFrameIndex,
    10235  VkDeviceSize size,
    10236  VkDeviceSize alignment,
    10237  const VmaAllocationCreateInfo& createInfo,
    10238  VmaSuballocationType suballocType,
    10239  VmaAllocation* pAllocation)
    10240 {
    10241  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10242  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10243  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10244  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10245  const bool canCreateNewBlock =
    10246  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10247  (m_Blocks.size() < m_MaxBlockCount);
    10248  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10249 
    10250  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10251  // Which in turn is available only when maxBlockCount = 1.
    10252  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10253  {
    10254  canMakeOtherLost = false;
    10255  }
    10256 
    10257  // Upper address can only be used with linear allocator and within single memory block.
    10258  if(isUpperAddress &&
    10259  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10260  {
    10261  return VK_ERROR_FEATURE_NOT_PRESENT;
    10262  }
    10263 
    10264  // Validate strategy.
    10265  switch(strategy)
    10266  {
    10267  case 0:
    10269  break;
    10273  break;
    10274  default:
    10275  return VK_ERROR_FEATURE_NOT_PRESENT;
    10276  }
    10277 
    10278  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10279  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10280  {
    10281  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10282  }
    10283 
    10284  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10285 
    10286  /*
    10287  Under certain condition, this whole section can be skipped for optimization, so
    10288  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10289  e.g. for custom pools with linear algorithm.
    10290  */
    10291  if(!canMakeOtherLost || canCreateNewBlock)
    10292  {
    10293  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10294  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10296 
    10297  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10298  {
    10299  // Use only last block.
    10300  if(!m_Blocks.empty())
    10301  {
    10302  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10303  VMA_ASSERT(pCurrBlock);
    10304  VkResult res = AllocateFromBlock(
    10305  pCurrBlock,
    10306  hCurrentPool,
    10307  currentFrameIndex,
    10308  size,
    10309  alignment,
    10310  allocFlagsCopy,
    10311  createInfo.pUserData,
    10312  suballocType,
    10313  strategy,
    10314  pAllocation);
    10315  if(res == VK_SUCCESS)
    10316  {
    10317  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10318  return VK_SUCCESS;
    10319  }
    10320  }
    10321  }
    10322  else
    10323  {
    10325  {
    10326  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10327  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10328  {
    10329  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10330  VMA_ASSERT(pCurrBlock);
    10331  VkResult res = AllocateFromBlock(
    10332  pCurrBlock,
    10333  hCurrentPool,
    10334  currentFrameIndex,
    10335  size,
    10336  alignment,
    10337  allocFlagsCopy,
    10338  createInfo.pUserData,
    10339  suballocType,
    10340  strategy,
    10341  pAllocation);
    10342  if(res == VK_SUCCESS)
    10343  {
    10344  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10345  return VK_SUCCESS;
    10346  }
    10347  }
    10348  }
    10349  else // WORST_FIT, FIRST_FIT
    10350  {
    10351  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10352  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10353  {
    10354  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10355  VMA_ASSERT(pCurrBlock);
    10356  VkResult res = AllocateFromBlock(
    10357  pCurrBlock,
    10358  hCurrentPool,
    10359  currentFrameIndex,
    10360  size,
    10361  alignment,
    10362  allocFlagsCopy,
    10363  createInfo.pUserData,
    10364  suballocType,
    10365  strategy,
    10366  pAllocation);
    10367  if(res == VK_SUCCESS)
    10368  {
    10369  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10370  return VK_SUCCESS;
    10371  }
    10372  }
    10373  }
    10374  }
    10375 
    10376  // 2. Try to create new block.
    10377  if(canCreateNewBlock)
    10378  {
    10379  // Calculate optimal size for new block.
    10380  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10381  uint32_t newBlockSizeShift = 0;
    10382  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10383 
    10384  if(!m_ExplicitBlockSize)
    10385  {
    10386  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10387  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10388  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10389  {
    10390  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10391  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10392  {
    10393  newBlockSize = smallerNewBlockSize;
    10394  ++newBlockSizeShift;
    10395  }
    10396  else
    10397  {
    10398  break;
    10399  }
    10400  }
    10401  }
    10402 
    10403  size_t newBlockIndex = 0;
    10404  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10405  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10406  if(!m_ExplicitBlockSize)
    10407  {
    10408  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10409  {
    10410  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10411  if(smallerNewBlockSize >= size)
    10412  {
    10413  newBlockSize = smallerNewBlockSize;
    10414  ++newBlockSizeShift;
    10415  res = CreateBlock(newBlockSize, &newBlockIndex);
    10416  }
    10417  else
    10418  {
    10419  break;
    10420  }
    10421  }
    10422  }
    10423 
    10424  if(res == VK_SUCCESS)
    10425  {
    10426  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10427  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10428 
    10429  res = AllocateFromBlock(
    10430  pBlock,
    10431  hCurrentPool,
    10432  currentFrameIndex,
    10433  size,
    10434  alignment,
    10435  allocFlagsCopy,
    10436  createInfo.pUserData,
    10437  suballocType,
    10438  strategy,
    10439  pAllocation);
    10440  if(res == VK_SUCCESS)
    10441  {
    10442  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10443  return VK_SUCCESS;
    10444  }
    10445  else
    10446  {
    10447  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10448  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10449  }
    10450  }
    10451  }
    10452  }
    10453 
    10454  // 3. Try to allocate from existing blocks with making other allocations lost.
    10455  if(canMakeOtherLost)
    10456  {
    10457  uint32_t tryIndex = 0;
    10458  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10459  {
    10460  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10461  VmaAllocationRequest bestRequest = {};
    10462  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10463 
    10464  // 1. Search existing allocations.
    10466  {
    10467  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10468  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10469  {
    10470  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10471  VMA_ASSERT(pCurrBlock);
    10472  VmaAllocationRequest currRequest = {};
    10473  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10474  currentFrameIndex,
    10475  m_FrameInUseCount,
    10476  m_BufferImageGranularity,
    10477  size,
    10478  alignment,
    10479  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10480  suballocType,
    10481  canMakeOtherLost,
    10482  strategy,
    10483  &currRequest))
    10484  {
    10485  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10486  if(pBestRequestBlock == VMA_NULL ||
    10487  currRequestCost < bestRequestCost)
    10488  {
    10489  pBestRequestBlock = pCurrBlock;
    10490  bestRequest = currRequest;
    10491  bestRequestCost = currRequestCost;
    10492 
    10493  if(bestRequestCost == 0)
    10494  {
    10495  break;
    10496  }
    10497  }
    10498  }
    10499  }
    10500  }
    10501  else // WORST_FIT, FIRST_FIT
    10502  {
    10503  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10504  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10505  {
    10506  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10507  VMA_ASSERT(pCurrBlock);
    10508  VmaAllocationRequest currRequest = {};
    10509  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10510  currentFrameIndex,
    10511  m_FrameInUseCount,
    10512  m_BufferImageGranularity,
    10513  size,
    10514  alignment,
    10515  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10516  suballocType,
    10517  canMakeOtherLost,
    10518  strategy,
    10519  &currRequest))
    10520  {
    10521  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10522  if(pBestRequestBlock == VMA_NULL ||
    10523  currRequestCost < bestRequestCost ||
    10525  {
    10526  pBestRequestBlock = pCurrBlock;
    10527  bestRequest = currRequest;
    10528  bestRequestCost = currRequestCost;
    10529 
    10530  if(bestRequestCost == 0 ||
    10532  {
    10533  break;
    10534  }
    10535  }
    10536  }
    10537  }
    10538  }
    10539 
    10540  if(pBestRequestBlock != VMA_NULL)
    10541  {
    10542  if(mapped)
    10543  {
    10544  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10545  if(res != VK_SUCCESS)
    10546  {
    10547  return res;
    10548  }
    10549  }
    10550 
    10551  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10552  currentFrameIndex,
    10553  m_FrameInUseCount,
    10554  &bestRequest))
    10555  {
    10556  // We no longer have an empty Allocation.
    10557  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10558  {
    10559  m_HasEmptyBlock = false;
    10560  }
    10561  // Allocate from this pBlock.
    10562  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10563  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10564  (*pAllocation)->InitBlockAllocation(
    10565  hCurrentPool,
    10566  pBestRequestBlock,
    10567  bestRequest.offset,
    10568  alignment,
    10569  size,
    10570  suballocType,
    10571  mapped,
    10572  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10573  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10574  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10575  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10576  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10577  {
    10578  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10579  }
    10580  if(IsCorruptionDetectionEnabled())
    10581  {
    10582  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10583  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10584  }
    10585  return VK_SUCCESS;
    10586  }
    10587  // else: Some allocations must have been touched while we are here. Next try.
    10588  }
    10589  else
    10590  {
    10591  // Could not find place in any of the blocks - break outer loop.
    10592  break;
    10593  }
    10594  }
    10595  /* Maximum number of tries exceeded - a very unlike event when many other
    10596  threads are simultaneously touching allocations making it impossible to make
    10597  lost at the same time as we try to allocate. */
    10598  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10599  {
    10600  return VK_ERROR_TOO_MANY_OBJECTS;
    10601  }
    10602  }
    10603 
    10604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10605 }
    10606 
    10607 void VmaBlockVector::Free(
    10608  VmaAllocation hAllocation)
    10609 {
    10610  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10611 
    10612  // Scope for lock.
    10613  {
    10614  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10615 
    10616  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10617 
    10618  if(IsCorruptionDetectionEnabled())
    10619  {
    10620  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10621  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10622  }
    10623 
    10624  if(hAllocation->IsPersistentMap())
    10625  {
    10626  pBlock->Unmap(m_hAllocator, 1);
    10627  }
    10628 
    10629  pBlock->m_pMetadata->Free(hAllocation);
    10630  VMA_HEAVY_ASSERT(pBlock->Validate());
    10631 
    10632  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10633 
    10634  // pBlock became empty after this deallocation.
    10635  if(pBlock->m_pMetadata->IsEmpty())
    10636  {
    10637  // Already has empty Allocation. We don't want to have two, so delete this one.
    10638  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10639  {
    10640  pBlockToDelete = pBlock;
    10641  Remove(pBlock);
    10642  }
    10643  // We now have first empty block.
    10644  else
    10645  {
    10646  m_HasEmptyBlock = true;
    10647  }
    10648  }
    10649  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10650  // (This is optional, heuristics.)
    10651  else if(m_HasEmptyBlock)
    10652  {
    10653  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10654  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10655  {
    10656  pBlockToDelete = pLastBlock;
    10657  m_Blocks.pop_back();
    10658  m_HasEmptyBlock = false;
    10659  }
    10660  }
    10661 
    10662  IncrementallySortBlocks();
    10663  }
    10664 
    10665  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10666  // lock, for performance reason.
    10667  if(pBlockToDelete != VMA_NULL)
    10668  {
    10669  VMA_DEBUG_LOG(" Deleted empty allocation");
    10670  pBlockToDelete->Destroy(m_hAllocator);
    10671  vma_delete(m_hAllocator, pBlockToDelete);
    10672  }
    10673 }
    10674 
    10675 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10676 {
    10677  VkDeviceSize result = 0;
    10678  for(size_t i = m_Blocks.size(); i--; )
    10679  {
    10680  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10681  if(result >= m_PreferredBlockSize)
    10682  {
    10683  break;
    10684  }
    10685  }
    10686  return result;
    10687 }
    10688 
    10689 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10690 {
    10691  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10692  {
    10693  if(m_Blocks[blockIndex] == pBlock)
    10694  {
    10695  VmaVectorRemove(m_Blocks, blockIndex);
    10696  return;
    10697  }
    10698  }
    10699  VMA_ASSERT(0);
    10700 }
    10701 
    10702 void VmaBlockVector::IncrementallySortBlocks()
    10703 {
    10704  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10705  {
    10706  // Bubble sort only until first swap.
    10707  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10708  {
    10709  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10710  {
    10711  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10712  return;
    10713  }
    10714  }
    10715  }
    10716 }
    10717 
    10718 VkResult VmaBlockVector::AllocateFromBlock(
    10719  VmaDeviceMemoryBlock* pBlock,
    10720  VmaPool hCurrentPool,
    10721  uint32_t currentFrameIndex,
    10722  VkDeviceSize size,
    10723  VkDeviceSize alignment,
    10724  VmaAllocationCreateFlags allocFlags,
    10725  void* pUserData,
    10726  VmaSuballocationType suballocType,
    10727  uint32_t strategy,
    10728  VmaAllocation* pAllocation)
    10729 {
    10730  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10731  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10732  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10733  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10734 
    10735  VmaAllocationRequest currRequest = {};
    10736  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  m_BufferImageGranularity,
    10740  size,
    10741  alignment,
    10742  isUpperAddress,
    10743  suballocType,
    10744  false, // canMakeOtherLost
    10745  strategy,
    10746  &currRequest))
    10747  {
    10748  // Allocate from pCurrBlock.
    10749  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10750 
    10751  if(mapped)
    10752  {
    10753  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10754  if(res != VK_SUCCESS)
    10755  {
    10756  return res;
    10757  }
    10758  }
    10759 
    10760  // We no longer have an empty Allocation.
    10761  if(pBlock->m_pMetadata->IsEmpty())
    10762  {
    10763  m_HasEmptyBlock = false;
    10764  }
    10765 
    10766  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10767  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10768  (*pAllocation)->InitBlockAllocation(
    10769  hCurrentPool,
    10770  pBlock,
    10771  currRequest.offset,
    10772  alignment,
    10773  size,
    10774  suballocType,
    10775  mapped,
    10776  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10777  VMA_HEAVY_ASSERT(pBlock->Validate());
    10778  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10779  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10780  {
    10781  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10782  }
    10783  if(IsCorruptionDetectionEnabled())
    10784  {
    10785  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10786  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10787  }
    10788  return VK_SUCCESS;
    10789  }
    10790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10791 }
    10792 
    10793 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10794 {
    10795  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10796  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10797  allocInfo.allocationSize = blockSize;
    10798  VkDeviceMemory mem = VK_NULL_HANDLE;
    10799  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10800  if(res < 0)
    10801  {
    10802  return res;
    10803  }
    10804 
    10805  // New VkDeviceMemory successfully created.
    10806 
    10807  // Create new Allocation for it.
    10808  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10809  pBlock->Init(
    10810  m_hAllocator,
    10811  m_MemoryTypeIndex,
    10812  mem,
    10813  allocInfo.allocationSize,
    10814  m_NextBlockId++,
    10815  m_Algorithm);
    10816 
    10817  m_Blocks.push_back(pBlock);
    10818  if(pNewBlockIndex != VMA_NULL)
    10819  {
    10820  *pNewBlockIndex = m_Blocks.size() - 1;
    10821  }
    10822 
    10823  return VK_SUCCESS;
    10824 }
    10825 
    10826 #if VMA_STATS_STRING_ENABLED
    10827 
    10828 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    10829 {
    10830  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10831 
    10832  json.BeginObject();
    10833 
    10834  if(m_IsCustomPool)
    10835  {
    10836  json.WriteString("MemoryTypeIndex");
    10837  json.WriteNumber(m_MemoryTypeIndex);
    10838 
    10839  json.WriteString("BlockSize");
    10840  json.WriteNumber(m_PreferredBlockSize);
    10841 
    10842  json.WriteString("BlockCount");
    10843  json.BeginObject(true);
    10844  if(m_MinBlockCount > 0)
    10845  {
    10846  json.WriteString("Min");
    10847  json.WriteNumber((uint64_t)m_MinBlockCount);
    10848  }
    10849  if(m_MaxBlockCount < SIZE_MAX)
    10850  {
    10851  json.WriteString("Max");
    10852  json.WriteNumber((uint64_t)m_MaxBlockCount);
    10853  }
    10854  json.WriteString("Cur");
    10855  json.WriteNumber((uint64_t)m_Blocks.size());
    10856  json.EndObject();
    10857 
    10858  if(m_FrameInUseCount > 0)
    10859  {
    10860  json.WriteString("FrameInUseCount");
    10861  json.WriteNumber(m_FrameInUseCount);
    10862  }
    10863 
    10864  if(m_Algorithm != 0)
    10865  {
    10866  json.WriteString("Algorithm");
    10867  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    10868  }
    10869  }
    10870  else
    10871  {
    10872  json.WriteString("PreferredBlockSize");
    10873  json.WriteNumber(m_PreferredBlockSize);
    10874  }
    10875 
    10876  json.WriteString("Blocks");
    10877  json.BeginObject();
    10878  for(size_t i = 0; i < m_Blocks.size(); ++i)
    10879  {
    10880  json.BeginString();
    10881  json.ContinueString(m_Blocks[i]->GetId());
    10882  json.EndString();
    10883 
    10884  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    10885  }
    10886  json.EndObject();
    10887 
    10888  json.EndObject();
    10889 }
    10890 
    10891 #endif // #if VMA_STATS_STRING_ENABLED
    10892 
    10893 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    10894  VmaAllocator hAllocator,
    10895  uint32_t currentFrameIndex)
    10896 {
    10897  if(m_pDefragmentator == VMA_NULL)
    10898  {
    10899  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    10900  hAllocator,
    10901  this,
    10902  currentFrameIndex);
    10903  }
    10904 
    10905  return m_pDefragmentator;
    10906 }
    10907 
    10908 VkResult VmaBlockVector::Defragment(
    10909  VmaDefragmentationStats* pDefragmentationStats,
    10910  VkDeviceSize& maxBytesToMove,
    10911  uint32_t& maxAllocationsToMove)
    10912 {
    10913  if(m_pDefragmentator == VMA_NULL)
    10914  {
    10915  return VK_SUCCESS;
    10916  }
    10917 
    10918  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10919 
    10920  // Defragment.
    10921  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    10922 
    10923  // Accumulate statistics.
    10924  if(pDefragmentationStats != VMA_NULL)
    10925  {
    10926  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    10927  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    10928  pDefragmentationStats->bytesMoved += bytesMoved;
    10929  pDefragmentationStats->allocationsMoved += allocationsMoved;
    10930  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    10931  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    10932  maxBytesToMove -= bytesMoved;
    10933  maxAllocationsToMove -= allocationsMoved;
    10934  }
    10935 
    10936  // Free empty blocks.
    10937  m_HasEmptyBlock = false;
    10938  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10939  {
    10940  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    10941  if(pBlock->m_pMetadata->IsEmpty())
    10942  {
    10943  if(m_Blocks.size() > m_MinBlockCount)
    10944  {
    10945  if(pDefragmentationStats != VMA_NULL)
    10946  {
    10947  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    10948  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    10949  }
    10950 
    10951  VmaVectorRemove(m_Blocks, blockIndex);
    10952  pBlock->Destroy(m_hAllocator);
    10953  vma_delete(m_hAllocator, pBlock);
    10954  }
    10955  else
    10956  {
    10957  m_HasEmptyBlock = true;
    10958  }
    10959  }
    10960  }
    10961 
    10962  return result;
    10963 }
    10964 
    10965 void VmaBlockVector::DestroyDefragmentator()
    10966 {
    10967  if(m_pDefragmentator != VMA_NULL)
    10968  {
    10969  vma_delete(m_hAllocator, m_pDefragmentator);
    10970  m_pDefragmentator = VMA_NULL;
    10971  }
    10972 }
    10973 
    10974 void VmaBlockVector::MakePoolAllocationsLost(
    10975  uint32_t currentFrameIndex,
    10976  size_t* pLostAllocationCount)
    10977 {
    10978  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10979  size_t lostAllocationCount = 0;
    10980  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10981  {
    10982  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10983  VMA_ASSERT(pBlock);
    10984  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    10985  }
    10986  if(pLostAllocationCount != VMA_NULL)
    10987  {
    10988  *pLostAllocationCount = lostAllocationCount;
    10989  }
    10990 }
    10991 
    10992 VkResult VmaBlockVector::CheckCorruption()
    10993 {
    10994  if(!IsCorruptionDetectionEnabled())
    10995  {
    10996  return VK_ERROR_FEATURE_NOT_PRESENT;
    10997  }
    10998 
    10999  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11000  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11001  {
    11002  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11003  VMA_ASSERT(pBlock);
    11004  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11005  if(res != VK_SUCCESS)
    11006  {
    11007  return res;
    11008  }
    11009  }
    11010  return VK_SUCCESS;
    11011 }
    11012 
    11013 void VmaBlockVector::AddStats(VmaStats* pStats)
    11014 {
    11015  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11016  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11017 
    11018  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11019 
    11020  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11021  {
    11022  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11023  VMA_ASSERT(pBlock);
    11024  VMA_HEAVY_ASSERT(pBlock->Validate());
    11025  VmaStatInfo allocationStatInfo;
    11026  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11027  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11028  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11029  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11030  }
    11031 }
    11032 
    11034 // VmaDefragmentator members definition
    11035 
    11036 VmaDefragmentator::VmaDefragmentator(
    11037  VmaAllocator hAllocator,
    11038  VmaBlockVector* pBlockVector,
    11039  uint32_t currentFrameIndex) :
    11040  m_hAllocator(hAllocator),
    11041  m_pBlockVector(pBlockVector),
    11042  m_CurrentFrameIndex(currentFrameIndex),
    11043  m_BytesMoved(0),
    11044  m_AllocationsMoved(0),
    11045  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11046  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11047 {
    11048  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11049 }
    11050 
    11051 VmaDefragmentator::~VmaDefragmentator()
    11052 {
    11053  for(size_t i = m_Blocks.size(); i--; )
    11054  {
    11055  vma_delete(m_hAllocator, m_Blocks[i]);
    11056  }
    11057 }
    11058 
    11059 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11060 {
    11061  AllocationInfo allocInfo;
    11062  allocInfo.m_hAllocation = hAlloc;
    11063  allocInfo.m_pChanged = pChanged;
    11064  m_Allocations.push_back(allocInfo);
    11065 }
    11066 
    11067 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11068 {
    11069  // It has already been mapped for defragmentation.
    11070  if(m_pMappedDataForDefragmentation)
    11071  {
    11072  *ppMappedData = m_pMappedDataForDefragmentation;
    11073  return VK_SUCCESS;
    11074  }
    11075 
    11076  // It is originally mapped.
    11077  if(m_pBlock->GetMappedData())
    11078  {
    11079  *ppMappedData = m_pBlock->GetMappedData();
    11080  return VK_SUCCESS;
    11081  }
    11082 
    11083  // Map on first usage.
    11084  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11085  *ppMappedData = m_pMappedDataForDefragmentation;
    11086  return res;
    11087 }
    11088 
    11089 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11090 {
    11091  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11092  {
    11093  m_pBlock->Unmap(hAllocator, 1);
    11094  }
    11095 }
    11096 
    11097 VkResult VmaDefragmentator::DefragmentRound(
    11098  VkDeviceSize maxBytesToMove,
    11099  uint32_t maxAllocationsToMove)
    11100 {
    11101  if(m_Blocks.empty())
    11102  {
    11103  return VK_SUCCESS;
    11104  }
    11105 
    11106  size_t srcBlockIndex = m_Blocks.size() - 1;
    11107  size_t srcAllocIndex = SIZE_MAX;
    11108  for(;;)
    11109  {
    11110  // 1. Find next allocation to move.
    11111  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11112  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11113  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11114  {
    11115  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11116  {
    11117  // Finished: no more allocations to process.
    11118  if(srcBlockIndex == 0)
    11119  {
    11120  return VK_SUCCESS;
    11121  }
    11122  else
    11123  {
    11124  --srcBlockIndex;
    11125  srcAllocIndex = SIZE_MAX;
    11126  }
    11127  }
    11128  else
    11129  {
    11130  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11131  }
    11132  }
    11133 
    11134  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11135  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11136 
    11137  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11138  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11139  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11140  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11141 
    11142  // 2. Try to find new place for this allocation in preceding or current block.
    11143  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11144  {
    11145  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11146  VmaAllocationRequest dstAllocRequest;
    11147  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11148  m_CurrentFrameIndex,
    11149  m_pBlockVector->GetFrameInUseCount(),
    11150  m_pBlockVector->GetBufferImageGranularity(),
    11151  size,
    11152  alignment,
    11153  false, // upperAddress
    11154  suballocType,
    11155  false, // canMakeOtherLost
    11157  &dstAllocRequest) &&
    11158  MoveMakesSense(
    11159  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11160  {
    11161  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11162 
    11163  // Reached limit on number of allocations or bytes to move.
    11164  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11165  (m_BytesMoved + size > maxBytesToMove))
    11166  {
    11167  return VK_INCOMPLETE;
    11168  }
    11169 
    11170  void* pDstMappedData = VMA_NULL;
    11171  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11172  if(res != VK_SUCCESS)
    11173  {
    11174  return res;
    11175  }
    11176 
    11177  void* pSrcMappedData = VMA_NULL;
    11178  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11179  if(res != VK_SUCCESS)
    11180  {
    11181  return res;
    11182  }
    11183 
    11184  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11185  memcpy(
    11186  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11187  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11188  static_cast<size_t>(size));
    11189 
    11190  if(VMA_DEBUG_MARGIN > 0)
    11191  {
    11192  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11193  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11194  }
    11195 
    11196  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11197  dstAllocRequest,
    11198  suballocType,
    11199  size,
    11200  false, // upperAddress
    11201  allocInfo.m_hAllocation);
    11202  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11203 
    11204  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11205 
    11206  if(allocInfo.m_pChanged != VMA_NULL)
    11207  {
    11208  *allocInfo.m_pChanged = VK_TRUE;
    11209  }
    11210 
    11211  ++m_AllocationsMoved;
    11212  m_BytesMoved += size;
    11213 
    11214  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11215 
    11216  break;
    11217  }
    11218  }
    11219 
    11220  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11221 
    11222  if(srcAllocIndex > 0)
    11223  {
    11224  --srcAllocIndex;
    11225  }
    11226  else
    11227  {
    11228  if(srcBlockIndex > 0)
    11229  {
    11230  --srcBlockIndex;
    11231  srcAllocIndex = SIZE_MAX;
    11232  }
    11233  else
    11234  {
    11235  return VK_SUCCESS;
    11236  }
    11237  }
    11238  }
    11239 }
    11240 
    11241 VkResult VmaDefragmentator::Defragment(
    11242  VkDeviceSize maxBytesToMove,
    11243  uint32_t maxAllocationsToMove)
    11244 {
    11245  if(m_Allocations.empty())
    11246  {
    11247  return VK_SUCCESS;
    11248  }
    11249 
    11250  // Create block info for each block.
    11251  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11252  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11253  {
    11254  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11255  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11256  m_Blocks.push_back(pBlockInfo);
    11257  }
    11258 
    11259  // Sort them by m_pBlock pointer value.
    11260  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11261 
    11262  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11263  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11264  {
    11265  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11266  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11267  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11268  {
    11269  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11270  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11271  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11272  {
    11273  (*it)->m_Allocations.push_back(allocInfo);
    11274  }
    11275  else
    11276  {
    11277  VMA_ASSERT(0);
    11278  }
    11279  }
    11280  }
    11281  m_Allocations.clear();
    11282 
    11283  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11284  {
    11285  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11286  pBlockInfo->CalcHasNonMovableAllocations();
    11287  pBlockInfo->SortAllocationsBySizeDescecnding();
    11288  }
    11289 
    11290  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11291  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11292 
    11293  // Execute defragmentation rounds (the main part).
    11294  VkResult result = VK_SUCCESS;
    11295  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11296  {
    11297  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11298  }
    11299 
    11300  // Unmap blocks that were mapped for defragmentation.
    11301  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11302  {
    11303  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11304  }
    11305 
    11306  return result;
    11307 }
    11308 
    11309 bool VmaDefragmentator::MoveMakesSense(
    11310  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11311  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11312 {
    11313  if(dstBlockIndex < srcBlockIndex)
    11314  {
    11315  return true;
    11316  }
    11317  if(dstBlockIndex > srcBlockIndex)
    11318  {
    11319  return false;
    11320  }
    11321  if(dstOffset < srcOffset)
    11322  {
    11323  return true;
    11324  }
    11325  return false;
    11326 }
    11327 
    11329 // VmaRecorder
    11330 
    11331 #if VMA_RECORDING_ENABLED
    11332 
    11333 VmaRecorder::VmaRecorder() :
    11334  m_UseMutex(true),
    11335  m_Flags(0),
    11336  m_File(VMA_NULL),
    11337  m_Freq(INT64_MAX),
    11338  m_StartCounter(INT64_MAX)
    11339 {
    11340 }
    11341 
    11342 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11343 {
    11344  m_UseMutex = useMutex;
    11345  m_Flags = settings.flags;
    11346 
    11347  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11348  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11349 
    11350  // Open file for writing.
    11351  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11352  if(err != 0)
    11353  {
    11354  return VK_ERROR_INITIALIZATION_FAILED;
    11355  }
    11356 
    11357  // Write header.
    11358  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11359  fprintf(m_File, "%s\n", "1,3");
    11360 
    11361  return VK_SUCCESS;
    11362 }
    11363 
    11364 VmaRecorder::~VmaRecorder()
    11365 {
    11366  if(m_File != VMA_NULL)
    11367  {
    11368  fclose(m_File);
    11369  }
    11370 }
    11371 
    11372 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11373 {
    11374  CallParams callParams;
    11375  GetBasicParams(callParams);
    11376 
    11377  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11378  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11379  Flush();
    11380 }
    11381 
    11382 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11383 {
    11384  CallParams callParams;
    11385  GetBasicParams(callParams);
    11386 
    11387  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11388  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11389  Flush();
    11390 }
    11391 
    11392 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11393 {
    11394  CallParams callParams;
    11395  GetBasicParams(callParams);
    11396 
    11397  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11398  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11399  createInfo.memoryTypeIndex,
    11400  createInfo.flags,
    11401  createInfo.blockSize,
    11402  (uint64_t)createInfo.minBlockCount,
    11403  (uint64_t)createInfo.maxBlockCount,
    11404  createInfo.frameInUseCount,
    11405  pool);
    11406  Flush();
    11407 }
    11408 
    11409 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11410 {
    11411  CallParams callParams;
    11412  GetBasicParams(callParams);
    11413 
    11414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11415  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11416  pool);
    11417  Flush();
    11418 }
    11419 
    11420 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11421  const VkMemoryRequirements& vkMemReq,
    11422  const VmaAllocationCreateInfo& createInfo,
    11423  VmaAllocation allocation)
    11424 {
    11425  CallParams callParams;
    11426  GetBasicParams(callParams);
    11427 
    11428  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11429  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11430  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11431  vkMemReq.size,
    11432  vkMemReq.alignment,
    11433  vkMemReq.memoryTypeBits,
    11434  createInfo.flags,
    11435  createInfo.usage,
    11436  createInfo.requiredFlags,
    11437  createInfo.preferredFlags,
    11438  createInfo.memoryTypeBits,
    11439  createInfo.pool,
    11440  allocation,
    11441  userDataStr.GetString());
    11442  Flush();
    11443 }
    11444 
    11445 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11446  const VkMemoryRequirements& vkMemReq,
    11447  bool requiresDedicatedAllocation,
    11448  bool prefersDedicatedAllocation,
    11449  const VmaAllocationCreateInfo& createInfo,
    11450  VmaAllocation allocation)
    11451 {
    11452  CallParams callParams;
    11453  GetBasicParams(callParams);
    11454 
    11455  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11456  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11457  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11458  vkMemReq.size,
    11459  vkMemReq.alignment,
    11460  vkMemReq.memoryTypeBits,
    11461  requiresDedicatedAllocation ? 1 : 0,
    11462  prefersDedicatedAllocation ? 1 : 0,
    11463  createInfo.flags,
    11464  createInfo.usage,
    11465  createInfo.requiredFlags,
    11466  createInfo.preferredFlags,
    11467  createInfo.memoryTypeBits,
    11468  createInfo.pool,
    11469  allocation,
    11470  userDataStr.GetString());
    11471  Flush();
    11472 }
    11473 
    11474 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11475  const VkMemoryRequirements& vkMemReq,
    11476  bool requiresDedicatedAllocation,
    11477  bool prefersDedicatedAllocation,
    11478  const VmaAllocationCreateInfo& createInfo,
    11479  VmaAllocation allocation)
    11480 {
    11481  CallParams callParams;
    11482  GetBasicParams(callParams);
    11483 
    11484  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11485  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11486  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11487  vkMemReq.size,
    11488  vkMemReq.alignment,
    11489  vkMemReq.memoryTypeBits,
    11490  requiresDedicatedAllocation ? 1 : 0,
    11491  prefersDedicatedAllocation ? 1 : 0,
    11492  createInfo.flags,
    11493  createInfo.usage,
    11494  createInfo.requiredFlags,
    11495  createInfo.preferredFlags,
    11496  createInfo.memoryTypeBits,
    11497  createInfo.pool,
    11498  allocation,
    11499  userDataStr.GetString());
    11500  Flush();
    11501 }
    11502 
    11503 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11504  VmaAllocation allocation)
    11505 {
    11506  CallParams callParams;
    11507  GetBasicParams(callParams);
    11508 
    11509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11510  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11511  allocation);
    11512  Flush();
    11513 }
    11514 
    11515 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11516  VmaAllocation allocation,
    11517  const void* pUserData)
    11518 {
    11519  CallParams callParams;
    11520  GetBasicParams(callParams);
    11521 
    11522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11523  UserDataString userDataStr(
    11524  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11525  pUserData);
    11526  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11527  allocation,
    11528  userDataStr.GetString());
    11529  Flush();
    11530 }
    11531 
    11532 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11533  VmaAllocation allocation)
    11534 {
    11535  CallParams callParams;
    11536  GetBasicParams(callParams);
    11537 
    11538  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11539  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11540  allocation);
    11541  Flush();
    11542 }
    11543 
    11544 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11545  VmaAllocation allocation)
    11546 {
    11547  CallParams callParams;
    11548  GetBasicParams(callParams);
    11549 
    11550  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11551  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11552  allocation);
    11553  Flush();
    11554 }
    11555 
    11556 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11557  VmaAllocation allocation)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11564  allocation);
    11565  Flush();
    11566 }
    11567 
    11568 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11569  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11570 {
    11571  CallParams callParams;
    11572  GetBasicParams(callParams);
    11573 
    11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11575  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11576  allocation,
    11577  offset,
    11578  size);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11583  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11590  allocation,
    11591  offset,
    11592  size);
    11593  Flush();
    11594 }
    11595 
    11596 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11597  const VkBufferCreateInfo& bufCreateInfo,
    11598  const VmaAllocationCreateInfo& allocCreateInfo,
    11599  VmaAllocation allocation)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11607  bufCreateInfo.flags,
    11608  bufCreateInfo.size,
    11609  bufCreateInfo.usage,
    11610  bufCreateInfo.sharingMode,
    11611  allocCreateInfo.flags,
    11612  allocCreateInfo.usage,
    11613  allocCreateInfo.requiredFlags,
    11614  allocCreateInfo.preferredFlags,
    11615  allocCreateInfo.memoryTypeBits,
    11616  allocCreateInfo.pool,
    11617  allocation,
    11618  userDataStr.GetString());
    11619  Flush();
    11620 }
    11621 
    11622 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11623  const VkImageCreateInfo& imageCreateInfo,
    11624  const VmaAllocationCreateInfo& allocCreateInfo,
    11625  VmaAllocation allocation)
    11626 {
    11627  CallParams callParams;
    11628  GetBasicParams(callParams);
    11629 
    11630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11631  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11632  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11633  imageCreateInfo.flags,
    11634  imageCreateInfo.imageType,
    11635  imageCreateInfo.format,
    11636  imageCreateInfo.extent.width,
    11637  imageCreateInfo.extent.height,
    11638  imageCreateInfo.extent.depth,
    11639  imageCreateInfo.mipLevels,
    11640  imageCreateInfo.arrayLayers,
    11641  imageCreateInfo.samples,
    11642  imageCreateInfo.tiling,
    11643  imageCreateInfo.usage,
    11644  imageCreateInfo.sharingMode,
    11645  imageCreateInfo.initialLayout,
    11646  allocCreateInfo.flags,
    11647  allocCreateInfo.usage,
    11648  allocCreateInfo.requiredFlags,
    11649  allocCreateInfo.preferredFlags,
    11650  allocCreateInfo.memoryTypeBits,
    11651  allocCreateInfo.pool,
    11652  allocation,
    11653  userDataStr.GetString());
    11654  Flush();
    11655 }
    11656 
    11657 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11658  VmaAllocation allocation)
    11659 {
    11660  CallParams callParams;
    11661  GetBasicParams(callParams);
    11662 
    11663  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11664  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11665  allocation);
    11666  Flush();
    11667 }
    11668 
    11669 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11677  allocation);
    11678  Flush();
    11679 }
    11680 
    11681 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11682  VmaAllocation allocation)
    11683 {
    11684  CallParams callParams;
    11685  GetBasicParams(callParams);
    11686 
    11687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11688  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11689  allocation);
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11706  VmaPool pool)
    11707 {
    11708  CallParams callParams;
    11709  GetBasicParams(callParams);
    11710 
    11711  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11712  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11713  pool);
    11714  Flush();
    11715 }
    11716 
    11717 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11718 {
    11719  if(pUserData != VMA_NULL)
    11720  {
    11721  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11722  {
    11723  m_Str = (const char*)pUserData;
    11724  }
    11725  else
    11726  {
    11727  sprintf_s(m_PtrStr, "%p", pUserData);
    11728  m_Str = m_PtrStr;
    11729  }
    11730  }
    11731  else
    11732  {
    11733  m_Str = "";
    11734  }
    11735 }
    11736 
    11737 void VmaRecorder::WriteConfiguration(
    11738  const VkPhysicalDeviceProperties& devProps,
    11739  const VkPhysicalDeviceMemoryProperties& memProps,
    11740  bool dedicatedAllocationExtensionEnabled)
    11741 {
    11742  fprintf(m_File, "Config,Begin\n");
    11743 
    11744  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11745  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11746  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11747  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11748  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11749  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11750 
    11751  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11752  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11753  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11754 
    11755  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11756  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11757  {
    11758  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11759  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11760  }
    11761  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11762  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11763  {
    11764  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11765  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11766  }
    11767 
    11768  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11769 
    11770  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11771  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11772  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11773  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11774  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11775  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11776  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11777  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11778  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11779 
    11780  fprintf(m_File, "Config,End\n");
    11781 }
    11782 
    11783 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11784 {
    11785  outParams.threadId = GetCurrentThreadId();
    11786 
    11787  LARGE_INTEGER counter;
    11788  QueryPerformanceCounter(&counter);
    11789  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11790 }
    11791 
    11792 void VmaRecorder::Flush()
    11793 {
    11794  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11795  {
    11796  fflush(m_File);
    11797  }
    11798 }
    11799 
    11800 #endif // #if VMA_RECORDING_ENABLED
    11801 
    11803 // VmaAllocator_T
    11804 
    11805 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    11806  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    11807  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    11808  m_hDevice(pCreateInfo->device),
    11809  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    11810  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    11811  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    11812  m_PreferredLargeHeapBlockSize(0),
    11813  m_PhysicalDevice(pCreateInfo->physicalDevice),
    11814  m_CurrentFrameIndex(0),
    11815  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    11816  m_NextPoolId(0)
    11818  ,m_pRecorder(VMA_NULL)
    11819 #endif
    11820 {
    11821  if(VMA_DEBUG_DETECT_CORRUPTION)
    11822  {
    11823  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    11824  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    11825  }
    11826 
    11827  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    11828 
    11829 #if !(VMA_DEDICATED_ALLOCATION)
    11831  {
    11832  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    11833  }
    11834 #endif
    11835 
    11836  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    11837  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    11838  memset(&m_MemProps, 0, sizeof(m_MemProps));
    11839 
    11840  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    11841  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    11842 
    11843  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    11844  {
    11845  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    11846  }
    11847 
    11848  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    11849  {
    11850  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    11851  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    11852  }
    11853 
    11854  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    11855 
    11856  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    11857  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    11858 
    11859  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    11860  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    11861  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    11862  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    11863 
    11864  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    11865  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11866 
    11867  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    11868  {
    11869  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    11870  {
    11871  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    11872  if(limit != VK_WHOLE_SIZE)
    11873  {
    11874  m_HeapSizeLimit[heapIndex] = limit;
    11875  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    11876  {
    11877  m_MemProps.memoryHeaps[heapIndex].size = limit;
    11878  }
    11879  }
    11880  }
    11881  }
    11882 
    11883  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    11884  {
    11885  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    11886 
    11887  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    11888  this,
    11889  memTypeIndex,
    11890  preferredBlockSize,
    11891  0,
    11892  SIZE_MAX,
    11893  GetBufferImageGranularity(),
    11894  pCreateInfo->frameInUseCount,
    11895  false, // isCustomPool
    11896  false, // explicitBlockSize
    11897  false); // linearAlgorithm
    11898  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    11899  // becase minBlockCount is 0.
    11900  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    11901 
    11902  }
    11903 }
    11904 
    11905 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    11906 {
    11907  VkResult res = VK_SUCCESS;
    11908 
    11909  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    11910  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    11911  {
    11912 #if VMA_RECORDING_ENABLED
    11913  m_pRecorder = vma_new(this, VmaRecorder)();
    11914  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    11915  if(res != VK_SUCCESS)
    11916  {
    11917  return res;
    11918  }
    11919  m_pRecorder->WriteConfiguration(
    11920  m_PhysicalDeviceProperties,
    11921  m_MemProps,
    11922  m_UseKhrDedicatedAllocation);
    11923  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    11924 #else
    11925  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    11926  return VK_ERROR_FEATURE_NOT_PRESENT;
    11927 #endif
    11928  }
    11929 
    11930  return res;
    11931 }
    11932 
    11933 VmaAllocator_T::~VmaAllocator_T()
    11934 {
    11935 #if VMA_RECORDING_ENABLED
    11936  if(m_pRecorder != VMA_NULL)
    11937  {
    11938  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    11939  vma_delete(this, m_pRecorder);
    11940  }
    11941 #endif
    11942 
    11943  VMA_ASSERT(m_Pools.empty());
    11944 
    11945  for(size_t i = GetMemoryTypeCount(); i--; )
    11946  {
    11947  vma_delete(this, m_pDedicatedAllocations[i]);
    11948  vma_delete(this, m_pBlockVectors[i]);
    11949  }
    11950 }
    11951 
    11952 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    11953 {
    11954 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11955  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    11956  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    11957  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    11958  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    11959  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    11960  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    11961  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    11962  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    11963  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    11964  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    11965  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    11966  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    11967  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    11968  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    11969  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    11970  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    11971 #if VMA_DEDICATED_ALLOCATION
    11972  if(m_UseKhrDedicatedAllocation)
    11973  {
    11974  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    11975  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    11976  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    11977  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    11978  }
    11979 #endif // #if VMA_DEDICATED_ALLOCATION
    11980 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    11981 
    11982 #define VMA_COPY_IF_NOT_NULL(funcName) \
    11983  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    11984 
    11985  if(pVulkanFunctions != VMA_NULL)
    11986  {
    11987  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    11988  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    11989  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    11990  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    11991  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    11992  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    11993  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    11994  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    11995  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    11996  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    11997  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    11998  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    11999  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12000  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12001  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12002  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12003 #if VMA_DEDICATED_ALLOCATION
    12004  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12005  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12006 #endif
    12007  }
    12008 
    12009 #undef VMA_COPY_IF_NOT_NULL
    12010 
    12011  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12012  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12013  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12014  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12015  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12016  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12017  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12018  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12019  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12020  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12021  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12022  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12023  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12024  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12025  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12026  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12027  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12028  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12029 #if VMA_DEDICATED_ALLOCATION
    12030  if(m_UseKhrDedicatedAllocation)
    12031  {
    12032  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12033  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12034  }
    12035 #endif
    12036 }
    12037 
    12038 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12039 {
    12040  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12041  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12042  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12043  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12044 }
    12045 
    12046 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12047  VkDeviceSize size,
    12048  VkDeviceSize alignment,
    12049  bool dedicatedAllocation,
    12050  VkBuffer dedicatedBuffer,
    12051  VkImage dedicatedImage,
    12052  const VmaAllocationCreateInfo& createInfo,
    12053  uint32_t memTypeIndex,
    12054  VmaSuballocationType suballocType,
    12055  VmaAllocation* pAllocation)
    12056 {
    12057  VMA_ASSERT(pAllocation != VMA_NULL);
    12058  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12059 
    12060  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12061 
    12062  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12063  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12064  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12065  {
    12066  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12067  }
    12068 
    12069  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12070  VMA_ASSERT(blockVector);
    12071 
    12072  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12073  bool preferDedicatedMemory =
    12074  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12075  dedicatedAllocation ||
    12076  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12077  size > preferredBlockSize / 2;
    12078 
    12079  if(preferDedicatedMemory &&
    12080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12081  finalCreateInfo.pool == VK_NULL_HANDLE)
    12082  {
    12084  }
    12085 
    12086  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12087  {
    12088  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12089  {
    12090  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12091  }
    12092  else
    12093  {
    12094  return AllocateDedicatedMemory(
    12095  size,
    12096  suballocType,
    12097  memTypeIndex,
    12098  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12099  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12100  finalCreateInfo.pUserData,
    12101  dedicatedBuffer,
    12102  dedicatedImage,
    12103  pAllocation);
    12104  }
    12105  }
    12106  else
    12107  {
    12108  VkResult res = blockVector->Allocate(
    12109  VK_NULL_HANDLE, // hCurrentPool
    12110  m_CurrentFrameIndex.load(),
    12111  size,
    12112  alignment,
    12113  finalCreateInfo,
    12114  suballocType,
    12115  pAllocation);
    12116  if(res == VK_SUCCESS)
    12117  {
    12118  return res;
    12119  }
    12120 
    12121  // 5. Try dedicated memory.
    12122  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12123  {
    12124  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12125  }
    12126  else
    12127  {
    12128  res = AllocateDedicatedMemory(
    12129  size,
    12130  suballocType,
    12131  memTypeIndex,
    12132  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12133  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12134  finalCreateInfo.pUserData,
    12135  dedicatedBuffer,
    12136  dedicatedImage,
    12137  pAllocation);
    12138  if(res == VK_SUCCESS)
    12139  {
    12140  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12141  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12142  return VK_SUCCESS;
    12143  }
    12144  else
    12145  {
    12146  // Everything failed: Return error code.
    12147  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12148  return res;
    12149  }
    12150  }
    12151  }
    12152 }
    12153 
    12154 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12155  VkDeviceSize size,
    12156  VmaSuballocationType suballocType,
    12157  uint32_t memTypeIndex,
    12158  bool map,
    12159  bool isUserDataString,
    12160  void* pUserData,
    12161  VkBuffer dedicatedBuffer,
    12162  VkImage dedicatedImage,
    12163  VmaAllocation* pAllocation)
    12164 {
    12165  VMA_ASSERT(pAllocation);
    12166 
    12167  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12168  allocInfo.memoryTypeIndex = memTypeIndex;
    12169  allocInfo.allocationSize = size;
    12170 
    12171 #if VMA_DEDICATED_ALLOCATION
    12172  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12173  if(m_UseKhrDedicatedAllocation)
    12174  {
    12175  if(dedicatedBuffer != VK_NULL_HANDLE)
    12176  {
    12177  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12178  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12179  allocInfo.pNext = &dedicatedAllocInfo;
    12180  }
    12181  else if(dedicatedImage != VK_NULL_HANDLE)
    12182  {
    12183  dedicatedAllocInfo.image = dedicatedImage;
    12184  allocInfo.pNext = &dedicatedAllocInfo;
    12185  }
    12186  }
    12187 #endif // #if VMA_DEDICATED_ALLOCATION
    12188 
    12189  // Allocate VkDeviceMemory.
    12190  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12191  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12192  if(res < 0)
    12193  {
    12194  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12195  return res;
    12196  }
    12197 
    12198  void* pMappedData = VMA_NULL;
    12199  if(map)
    12200  {
    12201  res = (*m_VulkanFunctions.vkMapMemory)(
    12202  m_hDevice,
    12203  hMemory,
    12204  0,
    12205  VK_WHOLE_SIZE,
    12206  0,
    12207  &pMappedData);
    12208  if(res < 0)
    12209  {
    12210  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12211  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12212  return res;
    12213  }
    12214  }
    12215 
    12216  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12217  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12218  (*pAllocation)->SetUserData(this, pUserData);
    12219  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12220  {
    12221  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12222  }
    12223 
    12224  // Register it in m_pDedicatedAllocations.
    12225  {
    12226  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12227  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12228  VMA_ASSERT(pDedicatedAllocations);
    12229  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12230  }
    12231 
    12232  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12233 
    12234  return VK_SUCCESS;
    12235 }
    12236 
    12237 void VmaAllocator_T::GetBufferMemoryRequirements(
    12238  VkBuffer hBuffer,
    12239  VkMemoryRequirements& memReq,
    12240  bool& requiresDedicatedAllocation,
    12241  bool& prefersDedicatedAllocation) const
    12242 {
    12243 #if VMA_DEDICATED_ALLOCATION
    12244  if(m_UseKhrDedicatedAllocation)
    12245  {
    12246  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12247  memReqInfo.buffer = hBuffer;
    12248 
    12249  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12250 
    12251  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12252  memReq2.pNext = &memDedicatedReq;
    12253 
    12254  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12255 
    12256  memReq = memReq2.memoryRequirements;
    12257  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12258  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12259  }
    12260  else
    12261 #endif // #if VMA_DEDICATED_ALLOCATION
    12262  {
    12263  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12264  requiresDedicatedAllocation = false;
    12265  prefersDedicatedAllocation = false;
    12266  }
    12267 }
    12268 
    12269 void VmaAllocator_T::GetImageMemoryRequirements(
    12270  VkImage hImage,
    12271  VkMemoryRequirements& memReq,
    12272  bool& requiresDedicatedAllocation,
    12273  bool& prefersDedicatedAllocation) const
    12274 {
    12275 #if VMA_DEDICATED_ALLOCATION
    12276  if(m_UseKhrDedicatedAllocation)
    12277  {
    12278  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12279  memReqInfo.image = hImage;
    12280 
    12281  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12282 
    12283  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12284  memReq2.pNext = &memDedicatedReq;
    12285 
    12286  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12287 
    12288  memReq = memReq2.memoryRequirements;
    12289  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12290  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12291  }
    12292  else
    12293 #endif // #if VMA_DEDICATED_ALLOCATION
    12294  {
    12295  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12296  requiresDedicatedAllocation = false;
    12297  prefersDedicatedAllocation = false;
    12298  }
    12299 }
    12300 
    12301 VkResult VmaAllocator_T::AllocateMemory(
    12302  const VkMemoryRequirements& vkMemReq,
    12303  bool requiresDedicatedAllocation,
    12304  bool prefersDedicatedAllocation,
    12305  VkBuffer dedicatedBuffer,
    12306  VkImage dedicatedImage,
    12307  const VmaAllocationCreateInfo& createInfo,
    12308  VmaSuballocationType suballocType,
    12309  VmaAllocation* pAllocation)
    12310 {
    12311  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12312 
    12313  if(vkMemReq.size == 0)
    12314  {
    12315  return VK_ERROR_VALIDATION_FAILED_EXT;
    12316  }
    12317  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12318  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12319  {
    12320  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12321  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12322  }
    12323  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12325  {
    12326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12328  }
    12329  if(requiresDedicatedAllocation)
    12330  {
    12331  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12332  {
    12333  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12334  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12335  }
    12336  if(createInfo.pool != VK_NULL_HANDLE)
    12337  {
    12338  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12339  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12340  }
    12341  }
    12342  if((createInfo.pool != VK_NULL_HANDLE) &&
    12343  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12344  {
    12345  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12346  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12347  }
    12348 
    12349  if(createInfo.pool != VK_NULL_HANDLE)
    12350  {
    12351  const VkDeviceSize alignmentForPool = VMA_MAX(
    12352  vkMemReq.alignment,
    12353  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12354  return createInfo.pool->m_BlockVector.Allocate(
    12355  createInfo.pool,
    12356  m_CurrentFrameIndex.load(),
    12357  vkMemReq.size,
    12358  alignmentForPool,
    12359  createInfo,
    12360  suballocType,
    12361  pAllocation);
    12362  }
    12363  else
    12364  {
    12365  // Bit mask of memory Vulkan types acceptable for this allocation.
    12366  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12367  uint32_t memTypeIndex = UINT32_MAX;
    12368  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12369  if(res == VK_SUCCESS)
    12370  {
    12371  VkDeviceSize alignmentForMemType = VMA_MAX(
    12372  vkMemReq.alignment,
    12373  GetMemoryTypeMinAlignment(memTypeIndex));
    12374 
    12375  res = AllocateMemoryOfType(
    12376  vkMemReq.size,
    12377  alignmentForMemType,
    12378  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12379  dedicatedBuffer,
    12380  dedicatedImage,
    12381  createInfo,
    12382  memTypeIndex,
    12383  suballocType,
    12384  pAllocation);
    12385  // Succeeded on first try.
    12386  if(res == VK_SUCCESS)
    12387  {
    12388  return res;
    12389  }
    12390  // Allocation from this memory type failed. Try other compatible memory types.
    12391  else
    12392  {
    12393  for(;;)
    12394  {
    12395  // Remove old memTypeIndex from list of possibilities.
    12396  memoryTypeBits &= ~(1u << memTypeIndex);
    12397  // Find alternative memTypeIndex.
    12398  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12399  if(res == VK_SUCCESS)
    12400  {
    12401  alignmentForMemType = VMA_MAX(
    12402  vkMemReq.alignment,
    12403  GetMemoryTypeMinAlignment(memTypeIndex));
    12404 
    12405  res = AllocateMemoryOfType(
    12406  vkMemReq.size,
    12407  alignmentForMemType,
    12408  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12409  dedicatedBuffer,
    12410  dedicatedImage,
    12411  createInfo,
    12412  memTypeIndex,
    12413  suballocType,
    12414  pAllocation);
    12415  // Allocation from this alternative memory type succeeded.
    12416  if(res == VK_SUCCESS)
    12417  {
    12418  return res;
    12419  }
    12420  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12421  }
    12422  // No other matching memory type index could be found.
    12423  else
    12424  {
    12425  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12426  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12427  }
    12428  }
    12429  }
    12430  }
    12431  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12432  else
    12433  return res;
    12434  }
    12435 }
    12436 
    12437 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12438 {
    12439  VMA_ASSERT(allocation);
    12440 
    12441  if(TouchAllocation(allocation))
    12442  {
    12443  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12444  {
    12445  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12446  }
    12447 
    12448  switch(allocation->GetType())
    12449  {
    12450  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12451  {
    12452  VmaBlockVector* pBlockVector = VMA_NULL;
    12453  VmaPool hPool = allocation->GetPool();
    12454  if(hPool != VK_NULL_HANDLE)
    12455  {
    12456  pBlockVector = &hPool->m_BlockVector;
    12457  }
    12458  else
    12459  {
    12460  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12461  pBlockVector = m_pBlockVectors[memTypeIndex];
    12462  }
    12463  pBlockVector->Free(allocation);
    12464  }
    12465  break;
    12466  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12467  FreeDedicatedMemory(allocation);
    12468  break;
    12469  default:
    12470  VMA_ASSERT(0);
    12471  }
    12472  }
    12473 
    12474  allocation->SetUserData(this, VMA_NULL);
    12475  vma_delete(this, allocation);
    12476 }
    12477 
    12478 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12479 {
    12480  // Initialize.
    12481  InitStatInfo(pStats->total);
    12482  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12483  InitStatInfo(pStats->memoryType[i]);
    12484  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12485  InitStatInfo(pStats->memoryHeap[i]);
    12486 
    12487  // Process default pools.
    12488  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12489  {
    12490  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12491  VMA_ASSERT(pBlockVector);
    12492  pBlockVector->AddStats(pStats);
    12493  }
    12494 
    12495  // Process custom pools.
    12496  {
    12497  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12498  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12499  {
    12500  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12501  }
    12502  }
    12503 
    12504  // Process dedicated allocations.
    12505  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12506  {
    12507  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12508  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12509  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12510  VMA_ASSERT(pDedicatedAllocVector);
    12511  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12512  {
    12513  VmaStatInfo allocationStatInfo;
    12514  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12515  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12516  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12517  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12518  }
    12519  }
    12520 
    12521  // Postprocess.
    12522  VmaPostprocessCalcStatInfo(pStats->total);
    12523  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12524  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12525  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12526  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12527 }
    12528 
    12529 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12530 
    12531 VkResult VmaAllocator_T::Defragment(
    12532  VmaAllocation* pAllocations,
    12533  size_t allocationCount,
    12534  VkBool32* pAllocationsChanged,
    12535  const VmaDefragmentationInfo* pDefragmentationInfo,
    12536  VmaDefragmentationStats* pDefragmentationStats)
    12537 {
    12538  if(pAllocationsChanged != VMA_NULL)
    12539  {
    12540  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12541  }
    12542  if(pDefragmentationStats != VMA_NULL)
    12543  {
    12544  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12545  }
    12546 
    12547  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12548 
    12549  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12550 
    12551  const size_t poolCount = m_Pools.size();
    12552 
    12553  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12554  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12555  {
    12556  VmaAllocation hAlloc = pAllocations[allocIndex];
    12557  VMA_ASSERT(hAlloc);
    12558  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12559  // DedicatedAlloc cannot be defragmented.
    12560  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12561  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12562  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12563  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12564  // Lost allocation cannot be defragmented.
    12565  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12566  {
    12567  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12568 
    12569  const VmaPool hAllocPool = hAlloc->GetPool();
    12570  // This allocation belongs to custom pool.
    12571  if(hAllocPool != VK_NULL_HANDLE)
    12572  {
    12573  // Pools with linear or buddy algorithm are not defragmented.
    12574  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12575  {
    12576  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12577  }
    12578  }
    12579  // This allocation belongs to general pool.
    12580  else
    12581  {
    12582  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12583  }
    12584 
    12585  if(pAllocBlockVector != VMA_NULL)
    12586  {
    12587  VmaDefragmentator* const pDefragmentator =
    12588  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12589  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12590  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12591  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12592  }
    12593  }
    12594  }
    12595 
    12596  VkResult result = VK_SUCCESS;
    12597 
    12598  // ======== Main processing.
    12599 
    12600  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12601  uint32_t maxAllocationsToMove = UINT32_MAX;
    12602  if(pDefragmentationInfo != VMA_NULL)
    12603  {
    12604  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12605  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12606  }
    12607 
    12608  // Process standard memory.
    12609  for(uint32_t memTypeIndex = 0;
    12610  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12611  ++memTypeIndex)
    12612  {
    12613  // Only HOST_VISIBLE memory types can be defragmented.
    12614  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12615  {
    12616  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12617  pDefragmentationStats,
    12618  maxBytesToMove,
    12619  maxAllocationsToMove);
    12620  }
    12621  }
    12622 
    12623  // Process custom pools.
    12624  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12625  {
    12626  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12627  pDefragmentationStats,
    12628  maxBytesToMove,
    12629  maxAllocationsToMove);
    12630  }
    12631 
    12632  // ======== Destroy defragmentators.
    12633 
    12634  // Process custom pools.
    12635  for(size_t poolIndex = poolCount; poolIndex--; )
    12636  {
    12637  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12638  }
    12639 
    12640  // Process standard memory.
    12641  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12642  {
    12643  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12644  {
    12645  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12646  }
    12647  }
    12648 
    12649  return result;
    12650 }
    12651 
    12652 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12653 {
    12654  if(hAllocation->CanBecomeLost())
    12655  {
    12656  /*
    12657  Warning: This is a carefully designed algorithm.
    12658  Do not modify unless you really know what you're doing :)
    12659  */
    12660  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12661  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12662  for(;;)
    12663  {
    12664  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12665  {
    12666  pAllocationInfo->memoryType = UINT32_MAX;
    12667  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12668  pAllocationInfo->offset = 0;
    12669  pAllocationInfo->size = hAllocation->GetSize();
    12670  pAllocationInfo->pMappedData = VMA_NULL;
    12671  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12672  return;
    12673  }
    12674  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12675  {
    12676  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12677  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12678  pAllocationInfo->offset = hAllocation->GetOffset();
    12679  pAllocationInfo->size = hAllocation->GetSize();
    12680  pAllocationInfo->pMappedData = VMA_NULL;
    12681  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12682  return;
    12683  }
    12684  else // Last use time earlier than current time.
    12685  {
    12686  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12687  {
    12688  localLastUseFrameIndex = localCurrFrameIndex;
    12689  }
    12690  }
    12691  }
    12692  }
    12693  else
    12694  {
    12695 #if VMA_STATS_STRING_ENABLED
    12696  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12697  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12698  for(;;)
    12699  {
    12700  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12701  if(localLastUseFrameIndex == localCurrFrameIndex)
    12702  {
    12703  break;
    12704  }
    12705  else // Last use time earlier than current time.
    12706  {
    12707  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12708  {
    12709  localLastUseFrameIndex = localCurrFrameIndex;
    12710  }
    12711  }
    12712  }
    12713 #endif
    12714 
    12715  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12716  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12717  pAllocationInfo->offset = hAllocation->GetOffset();
    12718  pAllocationInfo->size = hAllocation->GetSize();
    12719  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12720  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12721  }
    12722 }
    12723 
    12724 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12725 {
    12726  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12727  if(hAllocation->CanBecomeLost())
    12728  {
    12729  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12730  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12731  for(;;)
    12732  {
    12733  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12734  {
    12735  return false;
    12736  }
    12737  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12738  {
    12739  return true;
    12740  }
    12741  else // Last use time earlier than current time.
    12742  {
    12743  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12744  {
    12745  localLastUseFrameIndex = localCurrFrameIndex;
    12746  }
    12747  }
    12748  }
    12749  }
    12750  else
    12751  {
    12752 #if VMA_STATS_STRING_ENABLED
    12753  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12754  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12755  for(;;)
    12756  {
    12757  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12758  if(localLastUseFrameIndex == localCurrFrameIndex)
    12759  {
    12760  break;
    12761  }
    12762  else // Last use time earlier than current time.
    12763  {
    12764  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12765  {
    12766  localLastUseFrameIndex = localCurrFrameIndex;
    12767  }
    12768  }
    12769  }
    12770 #endif
    12771 
    12772  return true;
    12773  }
    12774 }
    12775 
    12776 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    12777 {
    12778  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    12779 
    12780  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    12781 
    12782  if(newCreateInfo.maxBlockCount == 0)
    12783  {
    12784  newCreateInfo.maxBlockCount = SIZE_MAX;
    12785  }
    12786  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    12787  {
    12788  return VK_ERROR_INITIALIZATION_FAILED;
    12789  }
    12790 
    12791  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    12792 
    12793  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    12794 
    12795  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    12796  if(res != VK_SUCCESS)
    12797  {
    12798  vma_delete(this, *pPool);
    12799  *pPool = VMA_NULL;
    12800  return res;
    12801  }
    12802 
    12803  // Add to m_Pools.
    12804  {
    12805  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12806  (*pPool)->SetId(m_NextPoolId++);
    12807  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    12808  }
    12809 
    12810  return VK_SUCCESS;
    12811 }
    12812 
    12813 void VmaAllocator_T::DestroyPool(VmaPool pool)
    12814 {
    12815  // Remove from m_Pools.
    12816  {
    12817  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12818  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    12819  VMA_ASSERT(success && "Pool not found in Allocator.");
    12820  }
    12821 
    12822  vma_delete(this, pool);
    12823 }
    12824 
    12825 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    12826 {
    12827  pool->m_BlockVector.GetPoolStats(pPoolStats);
    12828 }
    12829 
    12830 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    12831 {
    12832  m_CurrentFrameIndex.store(frameIndex);
    12833 }
    12834 
    12835 void VmaAllocator_T::MakePoolAllocationsLost(
    12836  VmaPool hPool,
    12837  size_t* pLostAllocationCount)
    12838 {
    12839  hPool->m_BlockVector.MakePoolAllocationsLost(
    12840  m_CurrentFrameIndex.load(),
    12841  pLostAllocationCount);
    12842 }
    12843 
    12844 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    12845 {
    12846  return hPool->m_BlockVector.CheckCorruption();
    12847 }
    12848 
    12849 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    12850 {
    12851  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    12852 
    12853  // Process default pools.
    12854  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12855  {
    12856  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    12857  {
    12858  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12859  VMA_ASSERT(pBlockVector);
    12860  VkResult localRes = pBlockVector->CheckCorruption();
    12861  switch(localRes)
    12862  {
    12863  case VK_ERROR_FEATURE_NOT_PRESENT:
    12864  break;
    12865  case VK_SUCCESS:
    12866  finalRes = VK_SUCCESS;
    12867  break;
    12868  default:
    12869  return localRes;
    12870  }
    12871  }
    12872  }
    12873 
    12874  // Process custom pools.
    12875  {
    12876  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12877  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12878  {
    12879  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    12880  {
    12881  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    12882  switch(localRes)
    12883  {
    12884  case VK_ERROR_FEATURE_NOT_PRESENT:
    12885  break;
    12886  case VK_SUCCESS:
    12887  finalRes = VK_SUCCESS;
    12888  break;
    12889  default:
    12890  return localRes;
    12891  }
    12892  }
    12893  }
    12894  }
    12895 
    12896  return finalRes;
    12897 }
    12898 
    12899 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    12900 {
    12901  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    12902  (*pAllocation)->InitLost();
    12903 }
    12904 
    12905 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    12906 {
    12907  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    12908 
    12909  VkResult res;
    12910  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12911  {
    12912  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12913  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    12914  {
    12915  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12916  if(res == VK_SUCCESS)
    12917  {
    12918  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    12919  }
    12920  }
    12921  else
    12922  {
    12923  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12924  }
    12925  }
    12926  else
    12927  {
    12928  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    12929  }
    12930 
    12931  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    12932  {
    12933  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    12934  }
    12935 
    12936  return res;
    12937 }
    12938 
    12939 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    12940 {
    12941  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    12942  {
    12943  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    12944  }
    12945 
    12946  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    12947 
    12948  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    12949  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    12950  {
    12951  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    12952  m_HeapSizeLimit[heapIndex] += size;
    12953  }
    12954 }
    12955 
    12956 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    12957 {
    12958  if(hAllocation->CanBecomeLost())
    12959  {
    12960  return VK_ERROR_MEMORY_MAP_FAILED;
    12961  }
    12962 
    12963  switch(hAllocation->GetType())
    12964  {
    12965  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12966  {
    12967  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12968  char *pBytes = VMA_NULL;
    12969  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    12970  if(res == VK_SUCCESS)
    12971  {
    12972  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    12973  hAllocation->BlockAllocMap();
    12974  }
    12975  return res;
    12976  }
    12977  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12978  return hAllocation->DedicatedAllocMap(this, ppData);
    12979  default:
    12980  VMA_ASSERT(0);
    12981  return VK_ERROR_MEMORY_MAP_FAILED;
    12982  }
    12983 }
    12984 
    12985 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    12986 {
    12987  switch(hAllocation->GetType())
    12988  {
    12989  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12990  {
    12991  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    12992  hAllocation->BlockAllocUnmap();
    12993  pBlock->Unmap(this, 1);
    12994  }
    12995  break;
    12996  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12997  hAllocation->DedicatedAllocUnmap(this);
    12998  break;
    12999  default:
    13000  VMA_ASSERT(0);
    13001  }
    13002 }
    13003 
    13004 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13005 {
    13006  VkResult res = VK_SUCCESS;
    13007  switch(hAllocation->GetType())
    13008  {
    13009  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13010  res = GetVulkanFunctions().vkBindBufferMemory(
    13011  m_hDevice,
    13012  hBuffer,
    13013  hAllocation->GetMemory(),
    13014  0); //memoryOffset
    13015  break;
    13016  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13017  {
    13018  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13019  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13020  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13021  break;
    13022  }
    13023  default:
    13024  VMA_ASSERT(0);
    13025  }
    13026  return res;
    13027 }
    13028 
    13029 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13030 {
    13031  VkResult res = VK_SUCCESS;
    13032  switch(hAllocation->GetType())
    13033  {
    13034  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13035  res = GetVulkanFunctions().vkBindImageMemory(
    13036  m_hDevice,
    13037  hImage,
    13038  hAllocation->GetMemory(),
    13039  0); //memoryOffset
    13040  break;
    13041  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13042  {
    13043  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13044  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13045  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13046  break;
    13047  }
    13048  default:
    13049  VMA_ASSERT(0);
    13050  }
    13051  return res;
    13052 }
    13053 
    13054 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13055  VmaAllocation hAllocation,
    13056  VkDeviceSize offset, VkDeviceSize size,
    13057  VMA_CACHE_OPERATION op)
    13058 {
    13059  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13060  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13061  {
    13062  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13063  VMA_ASSERT(offset <= allocationSize);
    13064 
    13065  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13066 
    13067  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13068  memRange.memory = hAllocation->GetMemory();
    13069 
    13070  switch(hAllocation->GetType())
    13071  {
    13072  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13073  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13074  if(size == VK_WHOLE_SIZE)
    13075  {
    13076  memRange.size = allocationSize - memRange.offset;
    13077  }
    13078  else
    13079  {
    13080  VMA_ASSERT(offset + size <= allocationSize);
    13081  memRange.size = VMA_MIN(
    13082  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13083  allocationSize - memRange.offset);
    13084  }
    13085  break;
    13086 
    13087  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13088  {
    13089  // 1. Still within this allocation.
    13090  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13091  if(size == VK_WHOLE_SIZE)
    13092  {
    13093  size = allocationSize - offset;
    13094  }
    13095  else
    13096  {
    13097  VMA_ASSERT(offset + size <= allocationSize);
    13098  }
    13099  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13100 
    13101  // 2. Adjust to whole block.
    13102  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13103  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13104  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13105  memRange.offset += allocationOffset;
    13106  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13107 
    13108  break;
    13109  }
    13110 
    13111  default:
    13112  VMA_ASSERT(0);
    13113  }
    13114 
    13115  switch(op)
    13116  {
    13117  case VMA_CACHE_FLUSH:
    13118  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13119  break;
    13120  case VMA_CACHE_INVALIDATE:
    13121  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13122  break;
    13123  default:
    13124  VMA_ASSERT(0);
    13125  }
    13126  }
    13127  // else: Just ignore this call.
    13128 }
    13129 
    13130 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13131 {
    13132  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13133 
    13134  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13135  {
    13136  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13137  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13138  VMA_ASSERT(pDedicatedAllocations);
    13139  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13140  VMA_ASSERT(success);
    13141  }
    13142 
    13143  VkDeviceMemory hMemory = allocation->GetMemory();
    13144 
    13145  /*
    13146  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13147  before vkFreeMemory.
    13148 
    13149  if(allocation->GetMappedData() != VMA_NULL)
    13150  {
    13151  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13152  }
    13153  */
    13154 
    13155  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13156 
    13157  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13158 }
    13159 
    13160 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13161 {
    13162  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13163  !hAllocation->CanBecomeLost() &&
    13164  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13165  {
    13166  void* pData = VMA_NULL;
    13167  VkResult res = Map(hAllocation, &pData);
    13168  if(res == VK_SUCCESS)
    13169  {
    13170  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13171  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13172  Unmap(hAllocation);
    13173  }
    13174  else
    13175  {
    13176  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13177  }
    13178  }
    13179 }
    13180 
    13181 #if VMA_STATS_STRING_ENABLED
    13182 
    13183 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13184 {
    13185  bool dedicatedAllocationsStarted = false;
    13186  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13187  {
    13188  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13189  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13190  VMA_ASSERT(pDedicatedAllocVector);
    13191  if(pDedicatedAllocVector->empty() == false)
    13192  {
    13193  if(dedicatedAllocationsStarted == false)
    13194  {
    13195  dedicatedAllocationsStarted = true;
    13196  json.WriteString("DedicatedAllocations");
    13197  json.BeginObject();
    13198  }
    13199 
    13200  json.BeginString("Type ");
    13201  json.ContinueString(memTypeIndex);
    13202  json.EndString();
    13203 
    13204  json.BeginArray();
    13205 
    13206  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13207  {
    13208  json.BeginObject(true);
    13209  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13210  hAlloc->PrintParameters(json);
    13211  json.EndObject();
    13212  }
    13213 
    13214  json.EndArray();
    13215  }
    13216  }
    13217  if(dedicatedAllocationsStarted)
    13218  {
    13219  json.EndObject();
    13220  }
    13221 
    13222  {
    13223  bool allocationsStarted = false;
    13224  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13225  {
    13226  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13227  {
    13228  if(allocationsStarted == false)
    13229  {
    13230  allocationsStarted = true;
    13231  json.WriteString("DefaultPools");
    13232  json.BeginObject();
    13233  }
    13234 
    13235  json.BeginString("Type ");
    13236  json.ContinueString(memTypeIndex);
    13237  json.EndString();
    13238 
    13239  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13240  }
    13241  }
    13242  if(allocationsStarted)
    13243  {
    13244  json.EndObject();
    13245  }
    13246  }
    13247 
    13248  // Custom pools
    13249  {
    13250  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13251  const size_t poolCount = m_Pools.size();
    13252  if(poolCount > 0)
    13253  {
    13254  json.WriteString("Pools");
    13255  json.BeginObject();
    13256  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13257  {
    13258  json.BeginString();
    13259  json.ContinueString(m_Pools[poolIndex]->GetId());
    13260  json.EndString();
    13261 
    13262  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13263  }
    13264  json.EndObject();
    13265  }
    13266  }
    13267 }
    13268 
    13269 #endif // #if VMA_STATS_STRING_ENABLED
    13270 
    13272 // Public interface
    13273 
    13274 VkResult vmaCreateAllocator(
    13275  const VmaAllocatorCreateInfo* pCreateInfo,
    13276  VmaAllocator* pAllocator)
    13277 {
    13278  VMA_ASSERT(pCreateInfo && pAllocator);
    13279  VMA_DEBUG_LOG("vmaCreateAllocator");
    13280  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13281  return (*pAllocator)->Init(pCreateInfo);
    13282 }
    13283 
    13284 void vmaDestroyAllocator(
    13285  VmaAllocator allocator)
    13286 {
    13287  if(allocator != VK_NULL_HANDLE)
    13288  {
    13289  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13290  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13291  vma_delete(&allocationCallbacks, allocator);
    13292  }
    13293 }
    13294 
    13296  VmaAllocator allocator,
    13297  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13298 {
    13299  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13300  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13301 }
    13302 
    13304  VmaAllocator allocator,
    13305  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13306 {
    13307  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13308  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13309 }
    13310 
    13312  VmaAllocator allocator,
    13313  uint32_t memoryTypeIndex,
    13314  VkMemoryPropertyFlags* pFlags)
    13315 {
    13316  VMA_ASSERT(allocator && pFlags);
    13317  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13318  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13319 }
    13320 
    13322  VmaAllocator allocator,
    13323  uint32_t frameIndex)
    13324 {
    13325  VMA_ASSERT(allocator);
    13326  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13327 
    13328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13329 
    13330  allocator->SetCurrentFrameIndex(frameIndex);
    13331 }
    13332 
    13333 void vmaCalculateStats(
    13334  VmaAllocator allocator,
    13335  VmaStats* pStats)
    13336 {
    13337  VMA_ASSERT(allocator && pStats);
    13338  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13339  allocator->CalculateStats(pStats);
    13340 }
    13341 
    13342 #if VMA_STATS_STRING_ENABLED
    13343 
    13344 void vmaBuildStatsString(
    13345  VmaAllocator allocator,
    13346  char** ppStatsString,
    13347  VkBool32 detailedMap)
    13348 {
    13349  VMA_ASSERT(allocator && ppStatsString);
    13350  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13351 
    13352  VmaStringBuilder sb(allocator);
    13353  {
    13354  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13355  json.BeginObject();
    13356 
    13357  VmaStats stats;
    13358  allocator->CalculateStats(&stats);
    13359 
    13360  json.WriteString("Total");
    13361  VmaPrintStatInfo(json, stats.total);
    13362 
    13363  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13364  {
    13365  json.BeginString("Heap ");
    13366  json.ContinueString(heapIndex);
    13367  json.EndString();
    13368  json.BeginObject();
    13369 
    13370  json.WriteString("Size");
    13371  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13372 
    13373  json.WriteString("Flags");
    13374  json.BeginArray(true);
    13375  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13376  {
    13377  json.WriteString("DEVICE_LOCAL");
    13378  }
    13379  json.EndArray();
    13380 
    13381  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13382  {
    13383  json.WriteString("Stats");
    13384  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13385  }
    13386 
    13387  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13388  {
    13389  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13390  {
    13391  json.BeginString("Type ");
    13392  json.ContinueString(typeIndex);
    13393  json.EndString();
    13394 
    13395  json.BeginObject();
    13396 
    13397  json.WriteString("Flags");
    13398  json.BeginArray(true);
    13399  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13400  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13401  {
    13402  json.WriteString("DEVICE_LOCAL");
    13403  }
    13404  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13405  {
    13406  json.WriteString("HOST_VISIBLE");
    13407  }
    13408  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13409  {
    13410  json.WriteString("HOST_COHERENT");
    13411  }
    13412  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13413  {
    13414  json.WriteString("HOST_CACHED");
    13415  }
    13416  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13417  {
    13418  json.WriteString("LAZILY_ALLOCATED");
    13419  }
    13420  json.EndArray();
    13421 
    13422  if(stats.memoryType[typeIndex].blockCount > 0)
    13423  {
    13424  json.WriteString("Stats");
    13425  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13426  }
    13427 
    13428  json.EndObject();
    13429  }
    13430  }
    13431 
    13432  json.EndObject();
    13433  }
    13434  if(detailedMap == VK_TRUE)
    13435  {
    13436  allocator->PrintDetailedMap(json);
    13437  }
    13438 
    13439  json.EndObject();
    13440  }
    13441 
    13442  const size_t len = sb.GetLength();
    13443  char* const pChars = vma_new_array(allocator, char, len + 1);
    13444  if(len > 0)
    13445  {
    13446  memcpy(pChars, sb.GetData(), len);
    13447  }
    13448  pChars[len] = '\0';
    13449  *ppStatsString = pChars;
    13450 }
    13451 
    13452 void vmaFreeStatsString(
    13453  VmaAllocator allocator,
    13454  char* pStatsString)
    13455 {
    13456  if(pStatsString != VMA_NULL)
    13457  {
    13458  VMA_ASSERT(allocator);
    13459  size_t len = strlen(pStatsString);
    13460  vma_delete_array(allocator, pStatsString, len + 1);
    13461  }
    13462 }
    13463 
    13464 #endif // #if VMA_STATS_STRING_ENABLED
    13465 
    13466 /*
    13467 This function is not protected by any mutex because it just reads immutable data.
    13468 */
    13469 VkResult vmaFindMemoryTypeIndex(
    13470  VmaAllocator allocator,
    13471  uint32_t memoryTypeBits,
    13472  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13473  uint32_t* pMemoryTypeIndex)
    13474 {
    13475  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13476  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13477  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13478 
    13479  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13480  {
    13481  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13482  }
    13483 
    13484  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13485  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13486 
    13487  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13488  if(mapped)
    13489  {
    13490  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13491  }
    13492 
    13493  // Convert usage to requiredFlags and preferredFlags.
    13494  switch(pAllocationCreateInfo->usage)
    13495  {
    13497  break;
    13499  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13500  {
    13501  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13502  }
    13503  break;
    13505  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13506  break;
    13508  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13509  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13510  {
    13511  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13512  }
    13513  break;
    13515  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13516  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13517  break;
    13518  default:
    13519  break;
    13520  }
    13521 
    13522  *pMemoryTypeIndex = UINT32_MAX;
    13523  uint32_t minCost = UINT32_MAX;
    13524  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13525  memTypeIndex < allocator->GetMemoryTypeCount();
    13526  ++memTypeIndex, memTypeBit <<= 1)
    13527  {
    13528  // This memory type is acceptable according to memoryTypeBits bitmask.
    13529  if((memTypeBit & memoryTypeBits) != 0)
    13530  {
    13531  const VkMemoryPropertyFlags currFlags =
    13532  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13533  // This memory type contains requiredFlags.
    13534  if((requiredFlags & ~currFlags) == 0)
    13535  {
    13536  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13537  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13538  // Remember memory type with lowest cost.
    13539  if(currCost < minCost)
    13540  {
    13541  *pMemoryTypeIndex = memTypeIndex;
    13542  if(currCost == 0)
    13543  {
    13544  return VK_SUCCESS;
    13545  }
    13546  minCost = currCost;
    13547  }
    13548  }
    13549  }
    13550  }
    13551  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  const VkBufferCreateInfo* pBufferCreateInfo,
    13557  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13558  uint32_t* pMemoryTypeIndex)
    13559 {
    13560  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13561  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13562  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13563  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13564 
    13565  const VkDevice hDev = allocator->m_hDevice;
    13566  VkBuffer hBuffer = VK_NULL_HANDLE;
    13567  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13568  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13569  if(res == VK_SUCCESS)
    13570  {
    13571  VkMemoryRequirements memReq = {};
    13572  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13573  hDev, hBuffer, &memReq);
    13574 
    13575  res = vmaFindMemoryTypeIndex(
    13576  allocator,
    13577  memReq.memoryTypeBits,
    13578  pAllocationCreateInfo,
    13579  pMemoryTypeIndex);
    13580 
    13581  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13582  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13583  }
    13584  return res;
    13585 }
    13586 
    13588  VmaAllocator allocator,
    13589  const VkImageCreateInfo* pImageCreateInfo,
    13590  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13591  uint32_t* pMemoryTypeIndex)
    13592 {
    13593  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13594  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13595  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13596  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13597 
    13598  const VkDevice hDev = allocator->m_hDevice;
    13599  VkImage hImage = VK_NULL_HANDLE;
    13600  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13601  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13602  if(res == VK_SUCCESS)
    13603  {
    13604  VkMemoryRequirements memReq = {};
    13605  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13606  hDev, hImage, &memReq);
    13607 
    13608  res = vmaFindMemoryTypeIndex(
    13609  allocator,
    13610  memReq.memoryTypeBits,
    13611  pAllocationCreateInfo,
    13612  pMemoryTypeIndex);
    13613 
    13614  allocator->GetVulkanFunctions().vkDestroyImage(
    13615  hDev, hImage, allocator->GetAllocationCallbacks());
    13616  }
    13617  return res;
    13618 }
    13619 
    13620 VkResult vmaCreatePool(
    13621  VmaAllocator allocator,
    13622  const VmaPoolCreateInfo* pCreateInfo,
    13623  VmaPool* pPool)
    13624 {
    13625  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13626 
    13627  VMA_DEBUG_LOG("vmaCreatePool");
    13628 
    13629  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13630 
    13631  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13632 
    13633 #if VMA_RECORDING_ENABLED
    13634  if(allocator->GetRecorder() != VMA_NULL)
    13635  {
    13636  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13637  }
    13638 #endif
    13639 
    13640  return res;
    13641 }
    13642 
    13643 void vmaDestroyPool(
    13644  VmaAllocator allocator,
    13645  VmaPool pool)
    13646 {
    13647  VMA_ASSERT(allocator);
    13648 
    13649  if(pool == VK_NULL_HANDLE)
    13650  {
    13651  return;
    13652  }
    13653 
    13654  VMA_DEBUG_LOG("vmaDestroyPool");
    13655 
    13656  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13657 
    13658 #if VMA_RECORDING_ENABLED
    13659  if(allocator->GetRecorder() != VMA_NULL)
    13660  {
    13661  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13662  }
    13663 #endif
    13664 
    13665  allocator->DestroyPool(pool);
    13666 }
    13667 
    13668 void vmaGetPoolStats(
    13669  VmaAllocator allocator,
    13670  VmaPool pool,
    13671  VmaPoolStats* pPoolStats)
    13672 {
    13673  VMA_ASSERT(allocator && pool && pPoolStats);
    13674 
    13675  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13676 
    13677  allocator->GetPoolStats(pool, pPoolStats);
    13678 }
    13679 
    13681  VmaAllocator allocator,
    13682  VmaPool pool,
    13683  size_t* pLostAllocationCount)
    13684 {
    13685  VMA_ASSERT(allocator && pool);
    13686 
    13687  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13688 
    13689 #if VMA_RECORDING_ENABLED
    13690  if(allocator->GetRecorder() != VMA_NULL)
    13691  {
    13692  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13693  }
    13694 #endif
    13695 
    13696  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13697 }
    13698 
    13699 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13700 {
    13701  VMA_ASSERT(allocator && pool);
    13702 
    13703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13704 
    13705  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13706 
    13707  return allocator->CheckPoolCorruption(pool);
    13708 }
    13709 
    13710 VkResult vmaAllocateMemory(
    13711  VmaAllocator allocator,
    13712  const VkMemoryRequirements* pVkMemoryRequirements,
    13713  const VmaAllocationCreateInfo* pCreateInfo,
    13714  VmaAllocation* pAllocation,
    13715  VmaAllocationInfo* pAllocationInfo)
    13716 {
    13717  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13718 
    13719  VMA_DEBUG_LOG("vmaAllocateMemory");
    13720 
    13721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13722 
    13723  VkResult result = allocator->AllocateMemory(
    13724  *pVkMemoryRequirements,
    13725  false, // requiresDedicatedAllocation
    13726  false, // prefersDedicatedAllocation
    13727  VK_NULL_HANDLE, // dedicatedBuffer
    13728  VK_NULL_HANDLE, // dedicatedImage
    13729  *pCreateInfo,
    13730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13731  pAllocation);
    13732 
    13733 #if VMA_RECORDING_ENABLED
    13734  if(allocator->GetRecorder() != VMA_NULL)
    13735  {
    13736  allocator->GetRecorder()->RecordAllocateMemory(
    13737  allocator->GetCurrentFrameIndex(),
    13738  *pVkMemoryRequirements,
    13739  *pCreateInfo,
    13740  *pAllocation);
    13741  }
    13742 #endif
    13743 
    13744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13745  {
    13746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13747  }
    13748 
    13749  return result;
    13750 }
    13751 
    13753  VmaAllocator allocator,
    13754  VkBuffer buffer,
    13755  const VmaAllocationCreateInfo* pCreateInfo,
    13756  VmaAllocation* pAllocation,
    13757  VmaAllocationInfo* pAllocationInfo)
    13758 {
    13759  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13760 
    13761  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13762 
    13763  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13764 
    13765  VkMemoryRequirements vkMemReq = {};
    13766  bool requiresDedicatedAllocation = false;
    13767  bool prefersDedicatedAllocation = false;
    13768  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    13769  requiresDedicatedAllocation,
    13770  prefersDedicatedAllocation);
    13771 
    13772  VkResult result = allocator->AllocateMemory(
    13773  vkMemReq,
    13774  requiresDedicatedAllocation,
    13775  prefersDedicatedAllocation,
    13776  buffer, // dedicatedBuffer
    13777  VK_NULL_HANDLE, // dedicatedImage
    13778  *pCreateInfo,
    13779  VMA_SUBALLOCATION_TYPE_BUFFER,
    13780  pAllocation);
    13781 
    13782 #if VMA_RECORDING_ENABLED
    13783  if(allocator->GetRecorder() != VMA_NULL)
    13784  {
    13785  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    13786  allocator->GetCurrentFrameIndex(),
    13787  vkMemReq,
    13788  requiresDedicatedAllocation,
    13789  prefersDedicatedAllocation,
    13790  *pCreateInfo,
    13791  *pAllocation);
    13792  }
    13793 #endif
    13794 
    13795  if(pAllocationInfo && result == VK_SUCCESS)
    13796  {
    13797  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13798  }
    13799 
    13800  return result;
    13801 }
    13802 
    13803 VkResult vmaAllocateMemoryForImage(
    13804  VmaAllocator allocator,
    13805  VkImage image,
    13806  const VmaAllocationCreateInfo* pCreateInfo,
    13807  VmaAllocation* pAllocation,
    13808  VmaAllocationInfo* pAllocationInfo)
    13809 {
    13810  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13811 
    13812  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    13813 
    13814  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13815 
    13816  VkMemoryRequirements vkMemReq = {};
    13817  bool requiresDedicatedAllocation = false;
    13818  bool prefersDedicatedAllocation = false;
    13819  allocator->GetImageMemoryRequirements(image, vkMemReq,
    13820  requiresDedicatedAllocation, prefersDedicatedAllocation);
    13821 
    13822  VkResult result = allocator->AllocateMemory(
    13823  vkMemReq,
    13824  requiresDedicatedAllocation,
    13825  prefersDedicatedAllocation,
    13826  VK_NULL_HANDLE, // dedicatedBuffer
    13827  image, // dedicatedImage
    13828  *pCreateInfo,
    13829  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    13830  pAllocation);
    13831 
    13832 #if VMA_RECORDING_ENABLED
    13833  if(allocator->GetRecorder() != VMA_NULL)
    13834  {
    13835  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    13836  allocator->GetCurrentFrameIndex(),
    13837  vkMemReq,
    13838  requiresDedicatedAllocation,
    13839  prefersDedicatedAllocation,
    13840  *pCreateInfo,
    13841  *pAllocation);
    13842  }
    13843 #endif
    13844 
    13845  if(pAllocationInfo && result == VK_SUCCESS)
    13846  {
    13847  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13848  }
    13849 
    13850  return result;
    13851 }
    13852 
    13853 void vmaFreeMemory(
    13854  VmaAllocator allocator,
    13855  VmaAllocation allocation)
    13856 {
    13857  VMA_ASSERT(allocator);
    13858 
    13859  if(allocation == VK_NULL_HANDLE)
    13860  {
    13861  return;
    13862  }
    13863 
    13864  VMA_DEBUG_LOG("vmaFreeMemory");
    13865 
    13866  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordFreeMemory(
    13872  allocator->GetCurrentFrameIndex(),
    13873  allocation);
    13874  }
    13875 #endif
    13876 
    13877  allocator->FreeMemory(allocation);
    13878 }
    13879 
    13881  VmaAllocator allocator,
    13882  VmaAllocation allocation,
    13883  VmaAllocationInfo* pAllocationInfo)
    13884 {
    13885  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    13886 
    13887  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13888 
    13889 #if VMA_RECORDING_ENABLED
    13890  if(allocator->GetRecorder() != VMA_NULL)
    13891  {
    13892  allocator->GetRecorder()->RecordGetAllocationInfo(
    13893  allocator->GetCurrentFrameIndex(),
    13894  allocation);
    13895  }
    13896 #endif
    13897 
    13898  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    13899 }
    13900 
    13901 VkBool32 vmaTouchAllocation(
    13902  VmaAllocator allocator,
    13903  VmaAllocation allocation)
    13904 {
    13905  VMA_ASSERT(allocator && allocation);
    13906 
    13907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13908 
    13909 #if VMA_RECORDING_ENABLED
    13910  if(allocator->GetRecorder() != VMA_NULL)
    13911  {
    13912  allocator->GetRecorder()->RecordTouchAllocation(
    13913  allocator->GetCurrentFrameIndex(),
    13914  allocation);
    13915  }
    13916 #endif
    13917 
    13918  return allocator->TouchAllocation(allocation);
    13919 }
    13920 
    13922  VmaAllocator allocator,
    13923  VmaAllocation allocation,
    13924  void* pUserData)
    13925 {
    13926  VMA_ASSERT(allocator && allocation);
    13927 
    13928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13929 
    13930  allocation->SetUserData(allocator, pUserData);
    13931 
    13932 #if VMA_RECORDING_ENABLED
    13933  if(allocator->GetRecorder() != VMA_NULL)
    13934  {
    13935  allocator->GetRecorder()->RecordSetAllocationUserData(
    13936  allocator->GetCurrentFrameIndex(),
    13937  allocation,
    13938  pUserData);
    13939  }
    13940 #endif
    13941 }
    13942 
    13944  VmaAllocator allocator,
    13945  VmaAllocation* pAllocation)
    13946 {
    13947  VMA_ASSERT(allocator && pAllocation);
    13948 
    13949  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    13950 
    13951  allocator->CreateLostAllocation(pAllocation);
    13952 
    13953 #if VMA_RECORDING_ENABLED
    13954  if(allocator->GetRecorder() != VMA_NULL)
    13955  {
    13956  allocator->GetRecorder()->RecordCreateLostAllocation(
    13957  allocator->GetCurrentFrameIndex(),
    13958  *pAllocation);
    13959  }
    13960 #endif
    13961 }
    13962 
    13963 VkResult vmaMapMemory(
    13964  VmaAllocator allocator,
    13965  VmaAllocation allocation,
    13966  void** ppData)
    13967 {
    13968  VMA_ASSERT(allocator && allocation && ppData);
    13969 
    13970  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13971 
    13972  VkResult res = allocator->Map(allocation, ppData);
    13973 
    13974 #if VMA_RECORDING_ENABLED
    13975  if(allocator->GetRecorder() != VMA_NULL)
    13976  {
    13977  allocator->GetRecorder()->RecordMapMemory(
    13978  allocator->GetCurrentFrameIndex(),
    13979  allocation);
    13980  }
    13981 #endif
    13982 
    13983  return res;
    13984 }
    13985 
    13986 void vmaUnmapMemory(
    13987  VmaAllocator allocator,
    13988  VmaAllocation allocation)
    13989 {
    13990  VMA_ASSERT(allocator && allocation);
    13991 
    13992  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13993 
    13994 #if VMA_RECORDING_ENABLED
    13995  if(allocator->GetRecorder() != VMA_NULL)
    13996  {
    13997  allocator->GetRecorder()->RecordUnmapMemory(
    13998  allocator->GetCurrentFrameIndex(),
    13999  allocation);
    14000  }
    14001 #endif
    14002 
    14003  allocator->Unmap(allocation);
    14004 }
    14005 
    14006 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14007 {
    14008  VMA_ASSERT(allocator && allocation);
    14009 
    14010  VMA_DEBUG_LOG("vmaFlushAllocation");
    14011 
    14012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14013 
    14014  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14015 
    14016 #if VMA_RECORDING_ENABLED
    14017  if(allocator->GetRecorder() != VMA_NULL)
    14018  {
    14019  allocator->GetRecorder()->RecordFlushAllocation(
    14020  allocator->GetCurrentFrameIndex(),
    14021  allocation, offset, size);
    14022  }
    14023 #endif
    14024 }
    14025 
    14026 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14027 {
    14028  VMA_ASSERT(allocator && allocation);
    14029 
    14030  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14031 
    14032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14033 
    14034  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14035 
    14036 #if VMA_RECORDING_ENABLED
    14037  if(allocator->GetRecorder() != VMA_NULL)
    14038  {
    14039  allocator->GetRecorder()->RecordInvalidateAllocation(
    14040  allocator->GetCurrentFrameIndex(),
    14041  allocation, offset, size);
    14042  }
    14043 #endif
    14044 }
    14045 
    14046 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14047 {
    14048  VMA_ASSERT(allocator);
    14049 
    14050  VMA_DEBUG_LOG("vmaCheckCorruption");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  return allocator->CheckCorruption(memoryTypeBits);
    14055 }
    14056 
    14057 VkResult vmaDefragment(
    14058  VmaAllocator allocator,
    14059  VmaAllocation* pAllocations,
    14060  size_t allocationCount,
    14061  VkBool32* pAllocationsChanged,
    14062  const VmaDefragmentationInfo *pDefragmentationInfo,
    14063  VmaDefragmentationStats* pDefragmentationStats)
    14064 {
    14065  VMA_ASSERT(allocator && pAllocations);
    14066 
    14067  VMA_DEBUG_LOG("vmaDefragment");
    14068 
    14069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14070 
    14071  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14072 }
    14073 
    14074 VkResult vmaBindBufferMemory(
    14075  VmaAllocator allocator,
    14076  VmaAllocation allocation,
    14077  VkBuffer buffer)
    14078 {
    14079  VMA_ASSERT(allocator && allocation && buffer);
    14080 
    14081  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14082 
    14083  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14084 
    14085  return allocator->BindBufferMemory(allocation, buffer);
    14086 }
    14087 
    14088 VkResult vmaBindImageMemory(
    14089  VmaAllocator allocator,
    14090  VmaAllocation allocation,
    14091  VkImage image)
    14092 {
    14093  VMA_ASSERT(allocator && allocation && image);
    14094 
    14095  VMA_DEBUG_LOG("vmaBindImageMemory");
    14096 
    14097  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14098 
    14099  return allocator->BindImageMemory(allocation, image);
    14100 }
    14101 
    14102 VkResult vmaCreateBuffer(
    14103  VmaAllocator allocator,
    14104  const VkBufferCreateInfo* pBufferCreateInfo,
    14105  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14106  VkBuffer* pBuffer,
    14107  VmaAllocation* pAllocation,
    14108  VmaAllocationInfo* pAllocationInfo)
    14109 {
    14110  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14111 
    14112  if(pBufferCreateInfo->size == 0)
    14113  {
    14114  return VK_ERROR_VALIDATION_FAILED_EXT;
    14115  }
    14116 
    14117  VMA_DEBUG_LOG("vmaCreateBuffer");
    14118 
    14119  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14120 
    14121  *pBuffer = VK_NULL_HANDLE;
    14122  *pAllocation = VK_NULL_HANDLE;
    14123 
    14124  // 1. Create VkBuffer.
    14125  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14126  allocator->m_hDevice,
    14127  pBufferCreateInfo,
    14128  allocator->GetAllocationCallbacks(),
    14129  pBuffer);
    14130  if(res >= 0)
    14131  {
    14132  // 2. vkGetBufferMemoryRequirements.
    14133  VkMemoryRequirements vkMemReq = {};
    14134  bool requiresDedicatedAllocation = false;
    14135  bool prefersDedicatedAllocation = false;
    14136  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14137  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14138 
    14139  // Make sure alignment requirements for specific buffer usages reported
    14140  // in Physical Device Properties are included in alignment reported by memory requirements.
    14141  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14142  {
    14143  VMA_ASSERT(vkMemReq.alignment %
    14144  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14145  }
    14146  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14147  {
    14148  VMA_ASSERT(vkMemReq.alignment %
    14149  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14150  }
    14151  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14152  {
    14153  VMA_ASSERT(vkMemReq.alignment %
    14154  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14155  }
    14156 
    14157  // 3. Allocate memory using allocator.
    14158  res = allocator->AllocateMemory(
    14159  vkMemReq,
    14160  requiresDedicatedAllocation,
    14161  prefersDedicatedAllocation,
    14162  *pBuffer, // dedicatedBuffer
    14163  VK_NULL_HANDLE, // dedicatedImage
    14164  *pAllocationCreateInfo,
    14165  VMA_SUBALLOCATION_TYPE_BUFFER,
    14166  pAllocation);
    14167 
    14168 #if VMA_RECORDING_ENABLED
    14169  if(allocator->GetRecorder() != VMA_NULL)
    14170  {
    14171  allocator->GetRecorder()->RecordCreateBuffer(
    14172  allocator->GetCurrentFrameIndex(),
    14173  *pBufferCreateInfo,
    14174  *pAllocationCreateInfo,
    14175  *pAllocation);
    14176  }
    14177 #endif
    14178 
    14179  if(res >= 0)
    14180  {
    14181  // 3. Bind buffer with memory.
    14182  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14183  if(res >= 0)
    14184  {
    14185  // All steps succeeded.
    14186  #if VMA_STATS_STRING_ENABLED
    14187  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14188  #endif
    14189  if(pAllocationInfo != VMA_NULL)
    14190  {
    14191  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14192  }
    14193 
    14194  return VK_SUCCESS;
    14195  }
    14196  allocator->FreeMemory(*pAllocation);
    14197  *pAllocation = VK_NULL_HANDLE;
    14198  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14199  *pBuffer = VK_NULL_HANDLE;
    14200  return res;
    14201  }
    14202  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14203  *pBuffer = VK_NULL_HANDLE;
    14204  return res;
    14205  }
    14206  return res;
    14207 }
    14208 
    14209 void vmaDestroyBuffer(
    14210  VmaAllocator allocator,
    14211  VkBuffer buffer,
    14212  VmaAllocation allocation)
    14213 {
    14214  VMA_ASSERT(allocator);
    14215 
    14216  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14217  {
    14218  return;
    14219  }
    14220 
    14221  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14222 
    14223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14224 
    14225 #if VMA_RECORDING_ENABLED
    14226  if(allocator->GetRecorder() != VMA_NULL)
    14227  {
    14228  allocator->GetRecorder()->RecordDestroyBuffer(
    14229  allocator->GetCurrentFrameIndex(),
    14230  allocation);
    14231  }
    14232 #endif
    14233 
    14234  if(buffer != VK_NULL_HANDLE)
    14235  {
    14236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14237  }
    14238 
    14239  if(allocation != VK_NULL_HANDLE)
    14240  {
    14241  allocator->FreeMemory(allocation);
    14242  }
    14243 }
    14244 
    14245 VkResult vmaCreateImage(
    14246  VmaAllocator allocator,
    14247  const VkImageCreateInfo* pImageCreateInfo,
    14248  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14249  VkImage* pImage,
    14250  VmaAllocation* pAllocation,
    14251  VmaAllocationInfo* pAllocationInfo)
    14252 {
    14253  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14254 
    14255  if(pImageCreateInfo->extent.width == 0 ||
    14256  pImageCreateInfo->extent.height == 0 ||
    14257  pImageCreateInfo->extent.depth == 0 ||
    14258  pImageCreateInfo->mipLevels == 0 ||
    14259  pImageCreateInfo->arrayLayers == 0)
    14260  {
    14261  return VK_ERROR_VALIDATION_FAILED_EXT;
    14262  }
    14263 
    14264  VMA_DEBUG_LOG("vmaCreateImage");
    14265 
    14266  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14267 
    14268  *pImage = VK_NULL_HANDLE;
    14269  *pAllocation = VK_NULL_HANDLE;
    14270 
    14271  // 1. Create VkImage.
    14272  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14273  allocator->m_hDevice,
    14274  pImageCreateInfo,
    14275  allocator->GetAllocationCallbacks(),
    14276  pImage);
    14277  if(res >= 0)
    14278  {
    14279  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14280  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14281  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14282 
    14283  // 2. Allocate memory using allocator.
    14284  VkMemoryRequirements vkMemReq = {};
    14285  bool requiresDedicatedAllocation = false;
    14286  bool prefersDedicatedAllocation = false;
    14287  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14288  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14289 
    14290  res = allocator->AllocateMemory(
    14291  vkMemReq,
    14292  requiresDedicatedAllocation,
    14293  prefersDedicatedAllocation,
    14294  VK_NULL_HANDLE, // dedicatedBuffer
    14295  *pImage, // dedicatedImage
    14296  *pAllocationCreateInfo,
    14297  suballocType,
    14298  pAllocation);
    14299 
    14300 #if VMA_RECORDING_ENABLED
    14301  if(allocator->GetRecorder() != VMA_NULL)
    14302  {
    14303  allocator->GetRecorder()->RecordCreateImage(
    14304  allocator->GetCurrentFrameIndex(),
    14305  *pImageCreateInfo,
    14306  *pAllocationCreateInfo,
    14307  *pAllocation);
    14308  }
    14309 #endif
    14310 
    14311  if(res >= 0)
    14312  {
    14313  // 3. Bind image with memory.
    14314  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14315  if(res >= 0)
    14316  {
    14317  // All steps succeeded.
    14318  #if VMA_STATS_STRING_ENABLED
    14319  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14320  #endif
    14321  if(pAllocationInfo != VMA_NULL)
    14322  {
    14323  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14324  }
    14325 
    14326  return VK_SUCCESS;
    14327  }
    14328  allocator->FreeMemory(*pAllocation);
    14329  *pAllocation = VK_NULL_HANDLE;
    14330  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14331  *pImage = VK_NULL_HANDLE;
    14332  return res;
    14333  }
    14334  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14335  *pImage = VK_NULL_HANDLE;
    14336  return res;
    14337  }
    14338  return res;
    14339 }
    14340 
    14341 void vmaDestroyImage(
    14342  VmaAllocator allocator,
    14343  VkImage image,
    14344  VmaAllocation allocation)
    14345 {
    14346  VMA_ASSERT(allocator);
    14347 
    14348  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14349  {
    14350  return;
    14351  }
    14352 
    14353  VMA_DEBUG_LOG("vmaDestroyImage");
    14354 
    14355  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14356 
    14357 #if VMA_RECORDING_ENABLED
    14358  if(allocator->GetRecorder() != VMA_NULL)
    14359  {
    14360  allocator->GetRecorder()->RecordDestroyImage(
    14361  allocator->GetCurrentFrameIndex(),
    14362  allocation);
    14363  }
    14364 #endif
    14365 
    14366  if(image != VK_NULL_HANDLE)
    14367  {
    14368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14369  }
    14370  if(allocation != VK_NULL_HANDLE)
    14371  {
    14372  allocator->FreeMemory(allocation);
    14373  }
    14374 }
    14375 
    14376 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2397 VkResult vmaResizeAllocation(
    2398  VmaAllocator allocator,
    2399  VmaAllocation allocation,
    2400  VkDeviceSize newSize);
    2401 
    2419  VmaAllocator allocator,
    2420  VmaAllocation allocation,
    2421  VmaAllocationInfo* pAllocationInfo);
    2422 
    2437 VkBool32 vmaTouchAllocation(
    2438  VmaAllocator allocator,
    2439  VmaAllocation allocation);
    2440 
    2455  VmaAllocator allocator,
    2456  VmaAllocation allocation,
    2457  void* pUserData);
    2458 
    2470  VmaAllocator allocator,
    2471  VmaAllocation* pAllocation);
    2472 
    2507 VkResult vmaMapMemory(
    2508  VmaAllocator allocator,
    2509  VmaAllocation allocation,
    2510  void** ppData);
    2511 
    2516 void vmaUnmapMemory(
    2517  VmaAllocator allocator,
    2518  VmaAllocation allocation);
    2519 
    2532 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2533 
    2546 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2547 
    2564 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2565 
    2567 typedef struct VmaDefragmentationInfo {
    2572  VkDeviceSize maxBytesToMove;
    2579 
    2581 typedef struct VmaDefragmentationStats {
    2583  VkDeviceSize bytesMoved;
    2585  VkDeviceSize bytesFreed;
    2591 
    2630 VkResult vmaDefragment(
    2631  VmaAllocator allocator,
    2632  VmaAllocation* pAllocations,
    2633  size_t allocationCount,
    2634  VkBool32* pAllocationsChanged,
    2635  const VmaDefragmentationInfo *pDefragmentationInfo,
    2636  VmaDefragmentationStats* pDefragmentationStats);
    2637 
    2650 VkResult vmaBindBufferMemory(
    2651  VmaAllocator allocator,
    2652  VmaAllocation allocation,
    2653  VkBuffer buffer);
    2654 
    2667 VkResult vmaBindImageMemory(
    2668  VmaAllocator allocator,
    2669  VmaAllocation allocation,
    2670  VkImage image);
    2671 
    2698 VkResult vmaCreateBuffer(
    2699  VmaAllocator allocator,
    2700  const VkBufferCreateInfo* pBufferCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkBuffer* pBuffer,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyBuffer(
    2718  VmaAllocator allocator,
    2719  VkBuffer buffer,
    2720  VmaAllocation allocation);
    2721 
    2723 VkResult vmaCreateImage(
    2724  VmaAllocator allocator,
    2725  const VkImageCreateInfo* pImageCreateInfo,
    2726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2727  VkImage* pImage,
    2728  VmaAllocation* pAllocation,
    2729  VmaAllocationInfo* pAllocationInfo);
    2730 
    2742 void vmaDestroyImage(
    2743  VmaAllocator allocator,
    2744  VkImage image,
    2745  VmaAllocation allocation);
    2746 
    2747 #ifdef __cplusplus
    2748 }
    2749 #endif
    2750 
    2751 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2752 
    2753 // For Visual Studio IntelliSense.
    2754 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2755 #define VMA_IMPLEMENTATION
    2756 #endif
    2757 
    2758 #ifdef VMA_IMPLEMENTATION
    2759 #undef VMA_IMPLEMENTATION
    2760 
    2761 #include <cstdint>
    2762 #include <cstdlib>
    2763 #include <cstring>
    2764 
    2765 /*******************************************************************************
    2766 CONFIGURATION SECTION
    2767 
    2768 Define some of these macros before each #include of this header or change them
    2769 here if you need other then default behavior depending on your environment.
    2770 */
    2771 
    2772 /*
    2773 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2774 internally, like:
    2775 
    2776  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2777 
    2778 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2779 VmaAllocatorCreateInfo::pVulkanFunctions.
    2780 */
    2781 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2782 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2783 #endif
    2784 
    2785 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2786 //#define VMA_USE_STL_CONTAINERS 1
    2787 
    2788 /* Set this macro to 1 to make the library including and using STL containers:
    2789 std::pair, std::vector, std::list, std::unordered_map.
    2790 
    2791 Set it to 0 or undefined to make the library using its own implementation of
    2792 the containers.
    2793 */
    2794 #if VMA_USE_STL_CONTAINERS
    2795  #define VMA_USE_STL_VECTOR 1
    2796  #define VMA_USE_STL_UNORDERED_MAP 1
    2797  #define VMA_USE_STL_LIST 1
    2798 #endif
    2799 
    2800 #if VMA_USE_STL_VECTOR
    2801  #include <vector>
    2802 #endif
    2803 
    2804 #if VMA_USE_STL_UNORDERED_MAP
    2805  #include <unordered_map>
    2806 #endif
    2807 
    2808 #if VMA_USE_STL_LIST
    2809  #include <list>
    2810 #endif
    2811 
    2812 /*
    2813 Following headers are used in this CONFIGURATION section only, so feel free to
    2814 remove them if not needed.
    2815 */
    2816 #include <cassert> // for assert
    2817 #include <algorithm> // for min, max
    2818 #include <mutex> // for std::mutex
    2819 #include <atomic> // for std::atomic
    2820 
    2821 #ifndef VMA_NULL
    2822  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2823  #define VMA_NULL nullptr
    2824 #endif
    2825 
    2826 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2827 #include <cstdlib>
    2828 void *aligned_alloc(size_t alignment, size_t size)
    2829 {
    2830  // alignment must be >= sizeof(void*)
    2831  if(alignment < sizeof(void*))
    2832  {
    2833  alignment = sizeof(void*);
    2834  }
    2835 
    2836  return memalign(alignment, size);
    2837 }
    2838 #elif defined(__APPLE__) || defined(__ANDROID__)
    2839 #include <cstdlib>
    2840 void *aligned_alloc(size_t alignment, size_t size)
    2841 {
    2842  // alignment must be >= sizeof(void*)
    2843  if(alignment < sizeof(void*))
    2844  {
    2845  alignment = sizeof(void*);
    2846  }
    2847 
    2848  void *pointer;
    2849  if(posix_memalign(&pointer, alignment, size) == 0)
    2850  return pointer;
    2851  return VMA_NULL;
    2852 }
    2853 #endif
    2854 
    2855 // If your compiler is not compatible with C++11 and definition of
    2856 // aligned_alloc() function is missing, uncommeting following line may help:
    2857 
    2858 //#include <malloc.h>
    2859 
    2860 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2861 #ifndef VMA_ASSERT
    2862  #ifdef _DEBUG
    2863  #define VMA_ASSERT(expr) assert(expr)
    2864  #else
    2865  #define VMA_ASSERT(expr)
    2866  #endif
    2867 #endif
    2868 
    2869 // Assert that will be called very often, like inside data structures e.g. operator[].
    2870 // Making it non-empty can make program slow.
    2871 #ifndef VMA_HEAVY_ASSERT
    2872  #ifdef _DEBUG
    2873  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2874  #else
    2875  #define VMA_HEAVY_ASSERT(expr)
    2876  #endif
    2877 #endif
    2878 
    2879 #ifndef VMA_ALIGN_OF
    2880  #define VMA_ALIGN_OF(type) (__alignof(type))
    2881 #endif
    2882 
    2883 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2884  #if defined(_WIN32)
    2885  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2886  #else
    2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2888  #endif
    2889 #endif
    2890 
    2891 #ifndef VMA_SYSTEM_FREE
    2892  #if defined(_WIN32)
    2893  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2894  #else
    2895  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2896  #endif
    2897 #endif
    2898 
    2899 #ifndef VMA_MIN
    2900  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2901 #endif
    2902 
    2903 #ifndef VMA_MAX
    2904  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2905 #endif
    2906 
    2907 #ifndef VMA_SWAP
    2908  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2909 #endif
    2910 
    2911 #ifndef VMA_SORT
    2912  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2913 #endif
    2914 
    2915 #ifndef VMA_DEBUG_LOG
    2916  #define VMA_DEBUG_LOG(format, ...)
    2917  /*
    2918  #define VMA_DEBUG_LOG(format, ...) do { \
    2919  printf(format, __VA_ARGS__); \
    2920  printf("\n"); \
    2921  } while(false)
    2922  */
    2923 #endif
    2924 
    2925 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2926 #if VMA_STATS_STRING_ENABLED
    2927  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2928  {
    2929  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2930  }
    2931  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2932  {
    2933  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2934  }
    2935  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2936  {
    2937  snprintf(outStr, strLen, "%p", ptr);
    2938  }
    2939 #endif
    2940 
    2941 #ifndef VMA_MUTEX
    2942  class VmaMutex
    2943  {
    2944  public:
    2945  VmaMutex() { }
    2946  ~VmaMutex() { }
    2947  void Lock() { m_Mutex.lock(); }
    2948  void Unlock() { m_Mutex.unlock(); }
    2949  private:
    2950  std::mutex m_Mutex;
    2951  };
    2952  #define VMA_MUTEX VmaMutex
    2953 #endif
    2954 
    2955 /*
    2956 If providing your own implementation, you need to implement a subset of std::atomic:
    2957 
    2958 - Constructor(uint32_t desired)
    2959 - uint32_t load() const
    2960 - void store(uint32_t desired)
    2961 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2962 */
    2963 #ifndef VMA_ATOMIC_UINT32
    2964  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2965 #endif
    2966 
    2967 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2968 
    2972  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2973 #endif
    2974 
    2975 #ifndef VMA_DEBUG_ALIGNMENT
    2976 
    2980  #define VMA_DEBUG_ALIGNMENT (1)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEBUG_MARGIN
    2984 
    2988  #define VMA_DEBUG_MARGIN (0)
    2989 #endif
    2990 
    2991 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2992 
    2996  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2997 #endif
    2998 
    2999 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3000 
    3005  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3006 #endif
    3007 
    3008 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3009 
    3013  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3014 #endif
    3015 
    3016 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3017 
    3021  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3022 #endif
    3023 
    3024 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3025  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3027 #endif
    3028 
    3029 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3030  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_CLASS_NO_COPY
    3035  #define VMA_CLASS_NO_COPY(className) \
    3036  private: \
    3037  className(const className&) = delete; \
    3038  className& operator=(const className&) = delete;
    3039 #endif
    3040 
    3041 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3042 
    3043 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3044 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3045 
    3046 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3047 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3048 
    3049 /*******************************************************************************
    3050 END OF CONFIGURATION
    3051 */
    3052 
    3053 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3054  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3055 
    3056 // Returns number of bits set to 1 in (v).
    3057 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3058 {
    3059  uint32_t c = v - ((v >> 1) & 0x55555555);
    3060  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3061  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3062  c = ((c >> 8) + c) & 0x00FF00FF;
    3063  c = ((c >> 16) + c) & 0x0000FFFF;
    3064  return c;
    3065 }
    3066 
    3067 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3068 // Use types like uint32_t, uint64_t as T.
    3069 template <typename T>
    3070 static inline T VmaAlignUp(T val, T align)
    3071 {
    3072  return (val + align - 1) / align * align;
    3073 }
    3074 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3075 // Use types like uint32_t, uint64_t as T.
    3076 template <typename T>
    3077 static inline T VmaAlignDown(T val, T align)
    3078 {
    3079  return val / align * align;
    3080 }
    3081 
    3082 // Division with mathematical rounding to nearest number.
    3083 template <typename T>
    3084 static inline T VmaRoundDiv(T x, T y)
    3085 {
    3086  return (x + (y / (T)2)) / y;
    3087 }
    3088 
    3089 /*
    3090 Returns true if given number is a power of two.
    3091 T must be unsigned integer number or signed integer but always nonnegative.
    3092 For 0 returns true.
    3093 */
    3094 template <typename T>
    3095 inline bool VmaIsPow2(T x)
    3096 {
    3097  return (x & (x-1)) == 0;
    3098 }
    3099 
    3100 // Returns smallest power of 2 greater or equal to v.
    3101 static inline uint32_t VmaNextPow2(uint32_t v)
    3102 {
    3103  v--;
    3104  v |= v >> 1;
    3105  v |= v >> 2;
    3106  v |= v >> 4;
    3107  v |= v >> 8;
    3108  v |= v >> 16;
    3109  v++;
    3110  return v;
    3111 }
    3112 static inline uint64_t VmaNextPow2(uint64_t v)
    3113 {
    3114  v--;
    3115  v |= v >> 1;
    3116  v |= v >> 2;
    3117  v |= v >> 4;
    3118  v |= v >> 8;
    3119  v |= v >> 16;
    3120  v |= v >> 32;
    3121  v++;
    3122  return v;
    3123 }
    3124 
    3125 // Returns largest power of 2 less or equal to v.
    3126 static inline uint32_t VmaPrevPow2(uint32_t v)
    3127 {
    3128  v |= v >> 1;
    3129  v |= v >> 2;
    3130  v |= v >> 4;
    3131  v |= v >> 8;
    3132  v |= v >> 16;
    3133  v = v ^ (v >> 1);
    3134  return v;
    3135 }
    3136 static inline uint64_t VmaPrevPow2(uint64_t v)
    3137 {
    3138  v |= v >> 1;
    3139  v |= v >> 2;
    3140  v |= v >> 4;
    3141  v |= v >> 8;
    3142  v |= v >> 16;
    3143  v |= v >> 32;
    3144  v = v ^ (v >> 1);
    3145  return v;
    3146 }
    3147 
    3148 static inline bool VmaStrIsEmpty(const char* pStr)
    3149 {
    3150  return pStr == VMA_NULL || *pStr == '\0';
    3151 }
    3152 
    3153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3154 {
    3155  switch(algorithm)
    3156  {
    3158  return "Linear";
    3160  return "Buddy";
    3161  case 0:
    3162  return "Default";
    3163  default:
    3164  VMA_ASSERT(0);
    3165  return "";
    3166  }
    3167 }
    3168 
    3169 #ifndef VMA_SORT
    3170 
    3171 template<typename Iterator, typename Compare>
    3172 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3173 {
    3174  Iterator centerValue = end; --centerValue;
    3175  Iterator insertIndex = beg;
    3176  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3177  {
    3178  if(cmp(*memTypeIndex, *centerValue))
    3179  {
    3180  if(insertIndex != memTypeIndex)
    3181  {
    3182  VMA_SWAP(*memTypeIndex, *insertIndex);
    3183  }
    3184  ++insertIndex;
    3185  }
    3186  }
    3187  if(insertIndex != centerValue)
    3188  {
    3189  VMA_SWAP(*insertIndex, *centerValue);
    3190  }
    3191  return insertIndex;
    3192 }
    3193 
    3194 template<typename Iterator, typename Compare>
    3195 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3196 {
    3197  if(beg < end)
    3198  {
    3199  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3200  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3201  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3202  }
    3203 }
    3204 
    3205 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3206 
    3207 #endif // #ifndef VMA_SORT
    3208 
    3209 /*
    3210 Returns true if two memory blocks occupy overlapping pages.
    3211 ResourceA must be in less memory offset than ResourceB.
    3212 
    3213 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3214 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3215 */
    3216 static inline bool VmaBlocksOnSamePage(
    3217  VkDeviceSize resourceAOffset,
    3218  VkDeviceSize resourceASize,
    3219  VkDeviceSize resourceBOffset,
    3220  VkDeviceSize pageSize)
    3221 {
    3222  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3223  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3224  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3225  VkDeviceSize resourceBStart = resourceBOffset;
    3226  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3227  return resourceAEndPage == resourceBStartPage;
    3228 }
    3229 
    3230 enum VmaSuballocationType
    3231 {
    3232  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3233  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3234  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3235  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3236  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3237  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3238  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3239 };
    3240 
    3241 /*
    3242 Returns true if given suballocation types could conflict and must respect
    3243 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3244 or linear image and another one is optimal image. If type is unknown, behave
    3245 conservatively.
    3246 */
    3247 static inline bool VmaIsBufferImageGranularityConflict(
    3248  VmaSuballocationType suballocType1,
    3249  VmaSuballocationType suballocType2)
    3250 {
    3251  if(suballocType1 > suballocType2)
    3252  {
    3253  VMA_SWAP(suballocType1, suballocType2);
    3254  }
    3255 
    3256  switch(suballocType1)
    3257  {
    3258  case VMA_SUBALLOCATION_TYPE_FREE:
    3259  return false;
    3260  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3261  return true;
    3262  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3263  return
    3264  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3266  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3267  return
    3268  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3274  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3275  return false;
    3276  default:
    3277  VMA_ASSERT(0);
    3278  return true;
    3279  }
    3280 }
    3281 
    3282 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3283 {
    3284  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3285  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3286  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3287  {
    3288  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3289  }
    3290 }
    3291 
    3292 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3293 {
    3294  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3295  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3296  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3297  {
    3298  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3299  {
    3300  return false;
    3301  }
    3302  }
    3303  return true;
    3304 }
    3305 
    3306 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3307 struct VmaMutexLock
    3308 {
    3309  VMA_CLASS_NO_COPY(VmaMutexLock)
    3310 public:
    3311  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3312  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3313  {
    3314  if(m_pMutex)
    3315  {
    3316  m_pMutex->Lock();
    3317  }
    3318  }
    3319 
    3320  ~VmaMutexLock()
    3321  {
    3322  if(m_pMutex)
    3323  {
    3324  m_pMutex->Unlock();
    3325  }
    3326  }
    3327 
    3328 private:
    3329  VMA_MUTEX* m_pMutex;
    3330 };
    3331 
    3332 #if VMA_DEBUG_GLOBAL_MUTEX
    3333  static VMA_MUTEX gDebugGlobalMutex;
    3334  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3335 #else
    3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3337 #endif
    3338 
    3339 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3340 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3341 
    3342 /*
    3343 Performs binary search and returns iterator to first element that is greater or
    3344 equal to (key), according to comparison (cmp).
    3345 
    3346 Cmp should return true if first argument is less than second argument.
    3347 
    3348 Returned value is the found element, if present in the collection or place where
    3349 new element with value (key) should be inserted.
    3350 */
    3351 template <typename CmpLess, typename IterT, typename KeyT>
    3352 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3353 {
    3354  size_t down = 0, up = (end - beg);
    3355  while(down < up)
    3356  {
    3357  const size_t mid = (down + up) / 2;
    3358  if(cmp(*(beg+mid), key))
    3359  {
    3360  down = mid + 1;
    3361  }
    3362  else
    3363  {
    3364  up = mid;
    3365  }
    3366  }
    3367  return beg + down;
    3368 }
    3369 
    3371 // Memory allocation
    3372 
    3373 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3374 {
    3375  if((pAllocationCallbacks != VMA_NULL) &&
    3376  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3377  {
    3378  return (*pAllocationCallbacks->pfnAllocation)(
    3379  pAllocationCallbacks->pUserData,
    3380  size,
    3381  alignment,
    3382  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3383  }
    3384  else
    3385  {
    3386  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3387  }
    3388 }
    3389 
    3390 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3391 {
    3392  if((pAllocationCallbacks != VMA_NULL) &&
    3393  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3394  {
    3395  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3396  }
    3397  else
    3398  {
    3399  VMA_SYSTEM_FREE(ptr);
    3400  }
    3401 }
    3402 
    3403 template<typename T>
    3404 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3405 {
    3406  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3416 
    3417 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3418 
    3419 template<typename T>
    3420 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3421 {
    3422  ptr->~T();
    3423  VmaFree(pAllocationCallbacks, ptr);
    3424 }
    3425 
    3426 template<typename T>
    3427 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3428 {
    3429  if(ptr != VMA_NULL)
    3430  {
    3431  for(size_t i = count; i--; )
    3432  {
    3433  ptr[i].~T();
    3434  }
    3435  VmaFree(pAllocationCallbacks, ptr);
    3436  }
    3437 }
    3438 
    3439 // STL-compatible allocator.
    3440 template<typename T>
    3441 class VmaStlAllocator
    3442 {
    3443 public:
    3444  const VkAllocationCallbacks* const m_pCallbacks;
    3445  typedef T value_type;
    3446 
    3447  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3448  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3449 
    3450  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3451  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3452 
    3453  template<typename U>
    3454  bool operator==(const VmaStlAllocator<U>& rhs) const
    3455  {
    3456  return m_pCallbacks == rhs.m_pCallbacks;
    3457  }
    3458  template<typename U>
    3459  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks != rhs.m_pCallbacks;
    3462  }
    3463 
    3464  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3465 };
    3466 
    3467 #if VMA_USE_STL_VECTOR
    3468 
    3469 #define VmaVector std::vector
    3470 
    3471 template<typename T, typename allocatorT>
    3472 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3473 {
    3474  vec.insert(vec.begin() + index, item);
    3475 }
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3479 {
    3480  vec.erase(vec.begin() + index);
    3481 }
    3482 
    3483 #else // #if VMA_USE_STL_VECTOR
    3484 
    3485 /* Class with interface compatible with subset of std::vector.
    3486 T must be POD because constructors and destructors are not called and memcpy is
    3487 used for these objects. */
    3488 template<typename T, typename AllocatorT>
    3489 class VmaVector
    3490 {
    3491 public:
    3492  typedef T value_type;
    3493 
    3494  VmaVector(const AllocatorT& allocator) :
    3495  m_Allocator(allocator),
    3496  m_pArray(VMA_NULL),
    3497  m_Count(0),
    3498  m_Capacity(0)
    3499  {
    3500  }
    3501 
    3502  VmaVector(size_t count, const AllocatorT& allocator) :
    3503  m_Allocator(allocator),
    3504  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3505  m_Count(count),
    3506  m_Capacity(count)
    3507  {
    3508  }
    3509 
    3510  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3511  m_Allocator(src.m_Allocator),
    3512  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3513  m_Count(src.m_Count),
    3514  m_Capacity(src.m_Count)
    3515  {
    3516  if(m_Count != 0)
    3517  {
    3518  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3519  }
    3520  }
    3521 
    3522  ~VmaVector()
    3523  {
    3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3525  }
    3526 
    3527  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3528  {
    3529  if(&rhs != this)
    3530  {
    3531  resize(rhs.m_Count);
    3532  if(m_Count != 0)
    3533  {
    3534  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3535  }
    3536  }
    3537  return *this;
    3538  }
    3539 
    3540  bool empty() const { return m_Count == 0; }
    3541  size_t size() const { return m_Count; }
    3542  T* data() { return m_pArray; }
    3543  const T* data() const { return m_pArray; }
    3544 
    3545  T& operator[](size_t index)
    3546  {
    3547  VMA_HEAVY_ASSERT(index < m_Count);
    3548  return m_pArray[index];
    3549  }
    3550  const T& operator[](size_t index) const
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555 
    3556  T& front()
    3557  {
    3558  VMA_HEAVY_ASSERT(m_Count > 0);
    3559  return m_pArray[0];
    3560  }
    3561  const T& front() const
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  T& back()
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[m_Count - 1];
    3570  }
    3571  const T& back() const
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576 
    3577  void reserve(size_t newCapacity, bool freeMemory = false)
    3578  {
    3579  newCapacity = VMA_MAX(newCapacity, m_Count);
    3580 
    3581  if((newCapacity < m_Capacity) && !freeMemory)
    3582  {
    3583  newCapacity = m_Capacity;
    3584  }
    3585 
    3586  if(newCapacity != m_Capacity)
    3587  {
    3588  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3589  if(m_Count != 0)
    3590  {
    3591  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3592  }
    3593  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3594  m_Capacity = newCapacity;
    3595  m_pArray = newArray;
    3596  }
    3597  }
    3598 
    3599  void resize(size_t newCount, bool freeMemory = false)
    3600  {
    3601  size_t newCapacity = m_Capacity;
    3602  if(newCount > m_Capacity)
    3603  {
    3604  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3605  }
    3606  else if(freeMemory)
    3607  {
    3608  newCapacity = newCount;
    3609  }
    3610 
    3611  if(newCapacity != m_Capacity)
    3612  {
    3613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3614  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3615  if(elementsToCopy != 0)
    3616  {
    3617  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3618  }
    3619  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3620  m_Capacity = newCapacity;
    3621  m_pArray = newArray;
    3622  }
    3623 
    3624  m_Count = newCount;
    3625  }
    3626 
    3627  void clear(bool freeMemory = false)
    3628  {
    3629  resize(0, freeMemory);
    3630  }
    3631 
    3632  void insert(size_t index, const T& src)
    3633  {
    3634  VMA_HEAVY_ASSERT(index <= m_Count);
    3635  const size_t oldCount = size();
    3636  resize(oldCount + 1);
    3637  if(index < oldCount)
    3638  {
    3639  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3640  }
    3641  m_pArray[index] = src;
    3642  }
    3643 
    3644  void remove(size_t index)
    3645  {
    3646  VMA_HEAVY_ASSERT(index < m_Count);
    3647  const size_t oldCount = size();
    3648  if(index < oldCount - 1)
    3649  {
    3650  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3651  }
    3652  resize(oldCount - 1);
    3653  }
    3654 
    3655  void push_back(const T& src)
    3656  {
    3657  const size_t newIndex = size();
    3658  resize(newIndex + 1);
    3659  m_pArray[newIndex] = src;
    3660  }
    3661 
    3662  void pop_back()
    3663  {
    3664  VMA_HEAVY_ASSERT(m_Count > 0);
    3665  resize(size() - 1);
    3666  }
    3667 
    3668  void push_front(const T& src)
    3669  {
    3670  insert(0, src);
    3671  }
    3672 
    3673  void pop_front()
    3674  {
    3675  VMA_HEAVY_ASSERT(m_Count > 0);
    3676  remove(0);
    3677  }
    3678 
    3679  typedef T* iterator;
    3680 
    3681  iterator begin() { return m_pArray; }
    3682  iterator end() { return m_pArray + m_Count; }
    3683 
    3684 private:
    3685  AllocatorT m_Allocator;
    3686  T* m_pArray;
    3687  size_t m_Count;
    3688  size_t m_Capacity;
    3689 };
    3690 
    3691 template<typename T, typename allocatorT>
    3692 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3693 {
    3694  vec.insert(index, item);
    3695 }
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3699 {
    3700  vec.remove(index);
    3701 }
    3702 
    3703 #endif // #if VMA_USE_STL_VECTOR
    3704 
    3705 template<typename CmpLess, typename VectorT>
    3706 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3707 {
    3708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3709  vector.data(),
    3710  vector.data() + vector.size(),
    3711  value,
    3712  CmpLess()) - vector.data();
    3713  VmaVectorInsert(vector, indexToInsert, value);
    3714  return indexToInsert;
    3715 }
    3716 
    3717 template<typename CmpLess, typename VectorT>
    3718 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3719 {
    3720  CmpLess comparator;
    3721  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3722  vector.begin(),
    3723  vector.end(),
    3724  value,
    3725  comparator);
    3726  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3727  {
    3728  size_t indexToRemove = it - vector.begin();
    3729  VmaVectorRemove(vector, indexToRemove);
    3730  return true;
    3731  }
    3732  return false;
    3733 }
    3734 
    3735 template<typename CmpLess, typename IterT, typename KeyT>
    3736 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3737 {
    3738  CmpLess comparator;
    3739  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3740  beg, end, value, comparator);
    3741  if(it == end ||
    3742  (!comparator(*it, value) && !comparator(value, *it)))
    3743  {
    3744  return it;
    3745  }
    3746  return end;
    3747 }
    3748 
    3750 // class VmaPoolAllocator
    3751 
    3752 /*
    3753 Allocator for objects of type T using a list of arrays (pools) to speed up
    3754 allocation. Number of elements that can be allocated is not bounded because
    3755 allocator can create multiple blocks.
    3756 */
    3757 template<typename T>
    3758 class VmaPoolAllocator
    3759 {
    3760  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3761 public:
    3762  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3763  ~VmaPoolAllocator();
    3764  void Clear();
    3765  T* Alloc();
    3766  void Free(T* ptr);
    3767 
    3768 private:
    3769  union Item
    3770  {
    3771  uint32_t NextFreeIndex;
    3772  T Value;
    3773  };
    3774 
    3775  struct ItemBlock
    3776  {
    3777  Item* pItems;
    3778  uint32_t FirstFreeIndex;
    3779  };
    3780 
    3781  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3782  size_t m_ItemsPerBlock;
    3783  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3784 
    3785  ItemBlock& CreateNewBlock();
    3786 };
    3787 
    3788 template<typename T>
    3789 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3790  m_pAllocationCallbacks(pAllocationCallbacks),
    3791  m_ItemsPerBlock(itemsPerBlock),
    3792  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3793 {
    3794  VMA_ASSERT(itemsPerBlock > 0);
    3795 }
    3796 
    3797 template<typename T>
    3798 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3799 {
    3800  Clear();
    3801 }
    3802 
    3803 template<typename T>
    3804 void VmaPoolAllocator<T>::Clear()
    3805 {
    3806  for(size_t i = m_ItemBlocks.size(); i--; )
    3807  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3808  m_ItemBlocks.clear();
    3809 }
    3810 
    3811 template<typename T>
    3812 T* VmaPoolAllocator<T>::Alloc()
    3813 {
    3814  for(size_t i = m_ItemBlocks.size(); i--; )
    3815  {
    3816  ItemBlock& block = m_ItemBlocks[i];
    3817  // This block has some free items: Use first one.
    3818  if(block.FirstFreeIndex != UINT32_MAX)
    3819  {
    3820  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3821  block.FirstFreeIndex = pItem->NextFreeIndex;
    3822  return &pItem->Value;
    3823  }
    3824  }
    3825 
    3826  // No block has free item: Create new one and use it.
    3827  ItemBlock& newBlock = CreateNewBlock();
    3828  Item* const pItem = &newBlock.pItems[0];
    3829  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3830  return &pItem->Value;
    3831 }
    3832 
    3833 template<typename T>
    3834 void VmaPoolAllocator<T>::Free(T* ptr)
    3835 {
    3836  // Search all memory blocks to find ptr.
    3837  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3838  {
    3839  ItemBlock& block = m_ItemBlocks[i];
    3840 
    3841  // Casting to union.
    3842  Item* pItemPtr;
    3843  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3844 
    3845  // Check if pItemPtr is in address range of this block.
    3846  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3847  {
    3848  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3849  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3850  block.FirstFreeIndex = index;
    3851  return;
    3852  }
    3853  }
    3854  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3855 }
    3856 
    3857 template<typename T>
    3858 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3859 {
    3860  ItemBlock newBlock = {
    3861  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3862 
    3863  m_ItemBlocks.push_back(newBlock);
    3864 
    3865  // Setup singly-linked list of all free items in this block.
    3866  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3867  newBlock.pItems[i].NextFreeIndex = i + 1;
    3868  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3869  return m_ItemBlocks.back();
    3870 }
    3871 
    3873 // class VmaRawList, VmaList
    3874 
    3875 #if VMA_USE_STL_LIST
    3876 
    3877 #define VmaList std::list
    3878 
    3879 #else // #if VMA_USE_STL_LIST
    3880 
    3881 template<typename T>
    3882 struct VmaListItem
    3883 {
    3884  VmaListItem* pPrev;
    3885  VmaListItem* pNext;
    3886  T Value;
    3887 };
    3888 
    3889 // Doubly linked list.
    3890 template<typename T>
    3891 class VmaRawList
    3892 {
    3893  VMA_CLASS_NO_COPY(VmaRawList)
    3894 public:
    3895  typedef VmaListItem<T> ItemType;
    3896 
    3897  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3898  ~VmaRawList();
    3899  void Clear();
    3900 
    3901  size_t GetCount() const { return m_Count; }
    3902  bool IsEmpty() const { return m_Count == 0; }
    3903 
    3904  ItemType* Front() { return m_pFront; }
    3905  const ItemType* Front() const { return m_pFront; }
    3906  ItemType* Back() { return m_pBack; }
    3907  const ItemType* Back() const { return m_pBack; }
    3908 
    3909  ItemType* PushBack();
    3910  ItemType* PushFront();
    3911  ItemType* PushBack(const T& value);
    3912  ItemType* PushFront(const T& value);
    3913  void PopBack();
    3914  void PopFront();
    3915 
    3916  // Item can be null - it means PushBack.
    3917  ItemType* InsertBefore(ItemType* pItem);
    3918  // Item can be null - it means PushFront.
    3919  ItemType* InsertAfter(ItemType* pItem);
    3920 
    3921  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3922  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3923 
    3924  void Remove(ItemType* pItem);
    3925 
    3926 private:
    3927  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3928  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3929  ItemType* m_pFront;
    3930  ItemType* m_pBack;
    3931  size_t m_Count;
    3932 };
    3933 
    3934 template<typename T>
    3935 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3936  m_pAllocationCallbacks(pAllocationCallbacks),
    3937  m_ItemAllocator(pAllocationCallbacks, 128),
    3938  m_pFront(VMA_NULL),
    3939  m_pBack(VMA_NULL),
    3940  m_Count(0)
    3941 {
    3942 }
    3943 
    3944 template<typename T>
    3945 VmaRawList<T>::~VmaRawList()
    3946 {
    3947  // Intentionally not calling Clear, because that would be unnecessary
    3948  // computations to return all items to m_ItemAllocator as free.
    3949 }
    3950 
    3951 template<typename T>
    3952 void VmaRawList<T>::Clear()
    3953 {
    3954  if(IsEmpty() == false)
    3955  {
    3956  ItemType* pItem = m_pBack;
    3957  while(pItem != VMA_NULL)
    3958  {
    3959  ItemType* const pPrevItem = pItem->pPrev;
    3960  m_ItemAllocator.Free(pItem);
    3961  pItem = pPrevItem;
    3962  }
    3963  m_pFront = VMA_NULL;
    3964  m_pBack = VMA_NULL;
    3965  m_Count = 0;
    3966  }
    3967 }
    3968 
    3969 template<typename T>
    3970 VmaListItem<T>* VmaRawList<T>::PushBack()
    3971 {
    3972  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3973  pNewItem->pNext = VMA_NULL;
    3974  if(IsEmpty())
    3975  {
    3976  pNewItem->pPrev = VMA_NULL;
    3977  m_pFront = pNewItem;
    3978  m_pBack = pNewItem;
    3979  m_Count = 1;
    3980  }
    3981  else
    3982  {
    3983  pNewItem->pPrev = m_pBack;
    3984  m_pBack->pNext = pNewItem;
    3985  m_pBack = pNewItem;
    3986  ++m_Count;
    3987  }
    3988  return pNewItem;
    3989 }
    3990 
    3991 template<typename T>
    3992 VmaListItem<T>* VmaRawList<T>::PushFront()
    3993 {
    3994  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3995  pNewItem->pPrev = VMA_NULL;
    3996  if(IsEmpty())
    3997  {
    3998  pNewItem->pNext = VMA_NULL;
    3999  m_pFront = pNewItem;
    4000  m_pBack = pNewItem;
    4001  m_Count = 1;
    4002  }
    4003  else
    4004  {
    4005  pNewItem->pNext = m_pFront;
    4006  m_pFront->pPrev = pNewItem;
    4007  m_pFront = pNewItem;
    4008  ++m_Count;
    4009  }
    4010  return pNewItem;
    4011 }
    4012 
    4013 template<typename T>
    4014 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4015 {
    4016  ItemType* const pNewItem = PushBack();
    4017  pNewItem->Value = value;
    4018  return pNewItem;
    4019 }
    4020 
    4021 template<typename T>
    4022 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4023 {
    4024  ItemType* const pNewItem = PushFront();
    4025  pNewItem->Value = value;
    4026  return pNewItem;
    4027 }
    4028 
    4029 template<typename T>
    4030 void VmaRawList<T>::PopBack()
    4031 {
    4032  VMA_HEAVY_ASSERT(m_Count > 0);
    4033  ItemType* const pBackItem = m_pBack;
    4034  ItemType* const pPrevItem = pBackItem->pPrev;
    4035  if(pPrevItem != VMA_NULL)
    4036  {
    4037  pPrevItem->pNext = VMA_NULL;
    4038  }
    4039  m_pBack = pPrevItem;
    4040  m_ItemAllocator.Free(pBackItem);
    4041  --m_Count;
    4042 }
    4043 
    4044 template<typename T>
    4045 void VmaRawList<T>::PopFront()
    4046 {
    4047  VMA_HEAVY_ASSERT(m_Count > 0);
    4048  ItemType* const pFrontItem = m_pFront;
    4049  ItemType* const pNextItem = pFrontItem->pNext;
    4050  if(pNextItem != VMA_NULL)
    4051  {
    4052  pNextItem->pPrev = VMA_NULL;
    4053  }
    4054  m_pFront = pNextItem;
    4055  m_ItemAllocator.Free(pFrontItem);
    4056  --m_Count;
    4057 }
    4058 
    4059 template<typename T>
    4060 void VmaRawList<T>::Remove(ItemType* pItem)
    4061 {
    4062  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4063  VMA_HEAVY_ASSERT(m_Count > 0);
    4064 
    4065  if(pItem->pPrev != VMA_NULL)
    4066  {
    4067  pItem->pPrev->pNext = pItem->pNext;
    4068  }
    4069  else
    4070  {
    4071  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4072  m_pFront = pItem->pNext;
    4073  }
    4074 
    4075  if(pItem->pNext != VMA_NULL)
    4076  {
    4077  pItem->pNext->pPrev = pItem->pPrev;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = pItem->pPrev;
    4083  }
    4084 
    4085  m_ItemAllocator.Free(pItem);
    4086  --m_Count;
    4087 }
    4088 
    4089 template<typename T>
    4090 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4091 {
    4092  if(pItem != VMA_NULL)
    4093  {
    4094  ItemType* const prevItem = pItem->pPrev;
    4095  ItemType* const newItem = m_ItemAllocator.Alloc();
    4096  newItem->pPrev = prevItem;
    4097  newItem->pNext = pItem;
    4098  pItem->pPrev = newItem;
    4099  if(prevItem != VMA_NULL)
    4100  {
    4101  prevItem->pNext = newItem;
    4102  }
    4103  else
    4104  {
    4105  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4106  m_pFront = newItem;
    4107  }
    4108  ++m_Count;
    4109  return newItem;
    4110  }
    4111  else
    4112  return PushBack();
    4113 }
    4114 
    4115 template<typename T>
    4116 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4117 {
    4118  if(pItem != VMA_NULL)
    4119  {
    4120  ItemType* const nextItem = pItem->pNext;
    4121  ItemType* const newItem = m_ItemAllocator.Alloc();
    4122  newItem->pNext = nextItem;
    4123  newItem->pPrev = pItem;
    4124  pItem->pNext = newItem;
    4125  if(nextItem != VMA_NULL)
    4126  {
    4127  nextItem->pPrev = newItem;
    4128  }
    4129  else
    4130  {
    4131  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4132  m_pBack = newItem;
    4133  }
    4134  ++m_Count;
    4135  return newItem;
    4136  }
    4137  else
    4138  return PushFront();
    4139 }
    4140 
    4141 template<typename T>
    4142 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4143 {
    4144  ItemType* const newItem = InsertBefore(pItem);
    4145  newItem->Value = value;
    4146  return newItem;
    4147 }
    4148 
    4149 template<typename T>
    4150 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4151 {
    4152  ItemType* const newItem = InsertAfter(pItem);
    4153  newItem->Value = value;
    4154  return newItem;
    4155 }
    4156 
    4157 template<typename T, typename AllocatorT>
    4158 class VmaList
    4159 {
    4160  VMA_CLASS_NO_COPY(VmaList)
    4161 public:
    4162  class iterator
    4163  {
    4164  public:
    4165  iterator() :
    4166  m_pList(VMA_NULL),
    4167  m_pItem(VMA_NULL)
    4168  {
    4169  }
    4170 
    4171  T& operator*() const
    4172  {
    4173  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4174  return m_pItem->Value;
    4175  }
    4176  T* operator->() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return &m_pItem->Value;
    4180  }
    4181 
    4182  iterator& operator++()
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  m_pItem = m_pItem->pNext;
    4186  return *this;
    4187  }
    4188  iterator& operator--()
    4189  {
    4190  if(m_pItem != VMA_NULL)
    4191  {
    4192  m_pItem = m_pItem->pPrev;
    4193  }
    4194  else
    4195  {
    4196  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4197  m_pItem = m_pList->Back();
    4198  }
    4199  return *this;
    4200  }
    4201 
    4202  iterator operator++(int)
    4203  {
    4204  iterator result = *this;
    4205  ++*this;
    4206  return result;
    4207  }
    4208  iterator operator--(int)
    4209  {
    4210  iterator result = *this;
    4211  --*this;
    4212  return result;
    4213  }
    4214 
    4215  bool operator==(const iterator& rhs) const
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4218  return m_pItem == rhs.m_pItem;
    4219  }
    4220  bool operator!=(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem != rhs.m_pItem;
    4224  }
    4225 
    4226  private:
    4227  VmaRawList<T>* m_pList;
    4228  VmaListItem<T>* m_pItem;
    4229 
    4230  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4231  m_pList(pList),
    4232  m_pItem(pItem)
    4233  {
    4234  }
    4235 
    4236  friend class VmaList<T, AllocatorT>;
    4237  };
    4238 
    4239  class const_iterator
    4240  {
    4241  public:
    4242  const_iterator() :
    4243  m_pList(VMA_NULL),
    4244  m_pItem(VMA_NULL)
    4245  {
    4246  }
    4247 
    4248  const_iterator(const iterator& src) :
    4249  m_pList(src.m_pList),
    4250  m_pItem(src.m_pItem)
    4251  {
    4252  }
    4253 
    4254  const T& operator*() const
    4255  {
    4256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4257  return m_pItem->Value;
    4258  }
    4259  const T* operator->() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return &m_pItem->Value;
    4263  }
    4264 
    4265  const_iterator& operator++()
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  m_pItem = m_pItem->pNext;
    4269  return *this;
    4270  }
    4271  const_iterator& operator--()
    4272  {
    4273  if(m_pItem != VMA_NULL)
    4274  {
    4275  m_pItem = m_pItem->pPrev;
    4276  }
    4277  else
    4278  {
    4279  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4280  m_pItem = m_pList->Back();
    4281  }
    4282  return *this;
    4283  }
    4284 
    4285  const_iterator operator++(int)
    4286  {
    4287  const_iterator result = *this;
    4288  ++*this;
    4289  return result;
    4290  }
    4291  const_iterator operator--(int)
    4292  {
    4293  const_iterator result = *this;
    4294  --*this;
    4295  return result;
    4296  }
    4297 
    4298  bool operator==(const const_iterator& rhs) const
    4299  {
    4300  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4301  return m_pItem == rhs.m_pItem;
    4302  }
    4303  bool operator!=(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem != rhs.m_pItem;
    4307  }
    4308 
    4309  private:
    4310  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4311  m_pList(pList),
    4312  m_pItem(pItem)
    4313  {
    4314  }
    4315 
    4316  const VmaRawList<T>* m_pList;
    4317  const VmaListItem<T>* m_pItem;
    4318 
    4319  friend class VmaList<T, AllocatorT>;
    4320  };
    4321 
    4322  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4323 
    4324  bool empty() const { return m_RawList.IsEmpty(); }
    4325  size_t size() const { return m_RawList.GetCount(); }
    4326 
    4327  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4328  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4329 
    4330  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4331  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4332 
    4333  void clear() { m_RawList.Clear(); }
    4334  void push_back(const T& value) { m_RawList.PushBack(value); }
    4335  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4336  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4337 
    4338 private:
    4339  VmaRawList<T> m_RawList;
    4340 };
    4341 
    4342 #endif // #if VMA_USE_STL_LIST
    4343 
    4345 // class VmaMap
    4346 
    4347 // Unused in this version.
    4348 #if 0
    4349 
    4350 #if VMA_USE_STL_UNORDERED_MAP
    4351 
    4352 #define VmaPair std::pair
    4353 
    4354 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4355  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4356 
    4357 #else // #if VMA_USE_STL_UNORDERED_MAP
    4358 
    4359 template<typename T1, typename T2>
    4360 struct VmaPair
    4361 {
    4362  T1 first;
    4363  T2 second;
    4364 
    4365  VmaPair() : first(), second() { }
    4366  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4367 };
    4368 
    4369 /* Class compatible with subset of interface of std::unordered_map.
    4370 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4371 */
    4372 template<typename KeyT, typename ValueT>
    4373 class VmaMap
    4374 {
    4375 public:
    4376  typedef VmaPair<KeyT, ValueT> PairType;
    4377  typedef PairType* iterator;
    4378 
    4379  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4380 
    4381  iterator begin() { return m_Vector.begin(); }
    4382  iterator end() { return m_Vector.end(); }
    4383 
    4384  void insert(const PairType& pair);
    4385  iterator find(const KeyT& key);
    4386  void erase(iterator it);
    4387 
    4388 private:
    4389  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4390 };
    4391 
    4392 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4393 
    4394 template<typename FirstT, typename SecondT>
    4395 struct VmaPairFirstLess
    4396 {
    4397  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4398  {
    4399  return lhs.first < rhs.first;
    4400  }
    4401  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4402  {
    4403  return lhs.first < rhsFirst;
    4404  }
    4405 };
    4406 
    4407 template<typename KeyT, typename ValueT>
    4408 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4409 {
    4410  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4411  m_Vector.data(),
    4412  m_Vector.data() + m_Vector.size(),
    4413  pair,
    4414  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4415  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4416 }
    4417 
    4418 template<typename KeyT, typename ValueT>
    4419 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4420 {
    4421  PairType* it = VmaBinaryFindFirstNotLess(
    4422  m_Vector.data(),
    4423  m_Vector.data() + m_Vector.size(),
    4424  key,
    4425  VmaPairFirstLess<KeyT, ValueT>());
    4426  if((it != m_Vector.end()) && (it->first == key))
    4427  {
    4428  return it;
    4429  }
    4430  else
    4431  {
    4432  return m_Vector.end();
    4433  }
    4434 }
    4435 
    4436 template<typename KeyT, typename ValueT>
    4437 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4438 {
    4439  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4440 }
    4441 
    4442 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4443 
    4444 #endif // #if 0
    4445 
    4447 
    4448 class VmaDeviceMemoryBlock;
    4449 
    4450 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4451 
    4452 struct VmaAllocation_T
    4453 {
    4454  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4455 private:
    4456  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4457 
    4458  enum FLAGS
    4459  {
    4460  FLAG_USER_DATA_STRING = 0x01,
    4461  };
    4462 
    4463 public:
    4464  enum ALLOCATION_TYPE
    4465  {
    4466  ALLOCATION_TYPE_NONE,
    4467  ALLOCATION_TYPE_BLOCK,
    4468  ALLOCATION_TYPE_DEDICATED,
    4469  };
    4470 
    4471  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4472  m_Alignment(1),
    4473  m_Size(0),
    4474  m_pUserData(VMA_NULL),
    4475  m_LastUseFrameIndex(currentFrameIndex),
    4476  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4477  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4478  m_MapCount(0),
    4479  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4480  {
    4481 #if VMA_STATS_STRING_ENABLED
    4482  m_CreationFrameIndex = currentFrameIndex;
    4483  m_BufferImageUsage = 0;
    4484 #endif
    4485  }
    4486 
    4487  ~VmaAllocation_T()
    4488  {
    4489  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4490 
    4491  // Check if owned string was freed.
    4492  VMA_ASSERT(m_pUserData == VMA_NULL);
    4493  }
    4494 
    4495  void InitBlockAllocation(
    4496  VmaPool hPool,
    4497  VmaDeviceMemoryBlock* block,
    4498  VkDeviceSize offset,
    4499  VkDeviceSize alignment,
    4500  VkDeviceSize size,
    4501  VmaSuballocationType suballocationType,
    4502  bool mapped,
    4503  bool canBecomeLost)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(block != VMA_NULL);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4508  m_Alignment = alignment;
    4509  m_Size = size;
    4510  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4511  m_SuballocationType = (uint8_t)suballocationType;
    4512  m_BlockAllocation.m_hPool = hPool;
    4513  m_BlockAllocation.m_Block = block;
    4514  m_BlockAllocation.m_Offset = offset;
    4515  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4516  }
    4517 
    4518  void InitLost()
    4519  {
    4520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4521  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4523  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4524  m_BlockAllocation.m_Block = VMA_NULL;
    4525  m_BlockAllocation.m_Offset = 0;
    4526  m_BlockAllocation.m_CanBecomeLost = true;
    4527  }
    4528 
    4529  void ChangeBlockAllocation(
    4530  VmaAllocator hAllocator,
    4531  VmaDeviceMemoryBlock* block,
    4532  VkDeviceSize offset);
    4533 
    4534  void ChangeSize(VkDeviceSize newSize);
    4535 
    4536  // pMappedData not null means allocation is created with MAPPED flag.
    4537  void InitDedicatedAllocation(
    4538  uint32_t memoryTypeIndex,
    4539  VkDeviceMemory hMemory,
    4540  VmaSuballocationType suballocationType,
    4541  void* pMappedData,
    4542  VkDeviceSize size)
    4543  {
    4544  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4545  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4546  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4547  m_Alignment = 0;
    4548  m_Size = size;
    4549  m_SuballocationType = (uint8_t)suballocationType;
    4550  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4551  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4552  m_DedicatedAllocation.m_hMemory = hMemory;
    4553  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4554  }
    4555 
    4556  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4557  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4558  VkDeviceSize GetSize() const { return m_Size; }
    4559  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4560  void* GetUserData() const { return m_pUserData; }
    4561  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4562  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4563 
    4564  VmaDeviceMemoryBlock* GetBlock() const
    4565  {
    4566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4567  return m_BlockAllocation.m_Block;
    4568  }
    4569  VkDeviceSize GetOffset() const;
    4570  VkDeviceMemory GetMemory() const;
    4571  uint32_t GetMemoryTypeIndex() const;
    4572  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4573  void* GetMappedData() const;
    4574  bool CanBecomeLost() const;
    4575  VmaPool GetPool() const;
    4576 
    4577  uint32_t GetLastUseFrameIndex() const
    4578  {
    4579  return m_LastUseFrameIndex.load();
    4580  }
    4581  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4582  {
    4583  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4584  }
    4585  /*
    4586  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4587  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4588  - Else, returns false.
    4589 
    4590  If hAllocation is already lost, assert - you should not call it then.
    4591  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4592  */
    4593  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4594 
    4595  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4596  {
    4597  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4598  outInfo.blockCount = 1;
    4599  outInfo.allocationCount = 1;
    4600  outInfo.unusedRangeCount = 0;
    4601  outInfo.usedBytes = m_Size;
    4602  outInfo.unusedBytes = 0;
    4603  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4604  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4605  outInfo.unusedRangeSizeMax = 0;
    4606  }
    4607 
    4608  void BlockAllocMap();
    4609  void BlockAllocUnmap();
    4610  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4611  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4612 
    4613 #if VMA_STATS_STRING_ENABLED
    4614  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4615  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4616 
    4617  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4618  {
    4619  VMA_ASSERT(m_BufferImageUsage == 0);
    4620  m_BufferImageUsage = bufferImageUsage;
    4621  }
    4622 
    4623  void PrintParameters(class VmaJsonWriter& json) const;
    4624 #endif
    4625 
    4626 private:
    4627  VkDeviceSize m_Alignment;
    4628  VkDeviceSize m_Size;
    4629  void* m_pUserData;
    4630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4631  uint8_t m_Type; // ALLOCATION_TYPE
    4632  uint8_t m_SuballocationType; // VmaSuballocationType
    4633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4635  uint8_t m_MapCount;
    4636  uint8_t m_Flags; // enum FLAGS
    4637 
    4638  // Allocation out of VmaDeviceMemoryBlock.
    4639  struct BlockAllocation
    4640  {
    4641  VmaPool m_hPool; // Null if belongs to general memory.
    4642  VmaDeviceMemoryBlock* m_Block;
    4643  VkDeviceSize m_Offset;
    4644  bool m_CanBecomeLost;
    4645  };
    4646 
    4647  // Allocation for an object that has its own private VkDeviceMemory.
    4648  struct DedicatedAllocation
    4649  {
    4650  uint32_t m_MemoryTypeIndex;
    4651  VkDeviceMemory m_hMemory;
    4652  void* m_pMappedData; // Not null means memory is mapped.
    4653  };
    4654 
    4655  union
    4656  {
    4657  // Allocation out of VmaDeviceMemoryBlock.
    4658  BlockAllocation m_BlockAllocation;
    4659  // Allocation for an object that has its own private VkDeviceMemory.
    4660  DedicatedAllocation m_DedicatedAllocation;
    4661  };
    4662 
    4663 #if VMA_STATS_STRING_ENABLED
    4664  uint32_t m_CreationFrameIndex;
    4665  uint32_t m_BufferImageUsage; // 0 if unknown.
    4666 #endif
    4667 
    4668  void FreeUserDataString(VmaAllocator hAllocator);
    4669 };
    4670 
    4671 /*
    4672 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4673 allocated memory block or free.
    4674 */
    4675 struct VmaSuballocation
    4676 {
    4677  VkDeviceSize offset;
    4678  VkDeviceSize size;
    4679  VmaAllocation hAllocation;
    4680  VmaSuballocationType type;
    4681 };
    4682 
    4683 // Comparator for offsets.
    4684 struct VmaSuballocationOffsetLess
    4685 {
    4686  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4687  {
    4688  return lhs.offset < rhs.offset;
    4689  }
    4690 };
    4691 struct VmaSuballocationOffsetGreater
    4692 {
    4693  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4694  {
    4695  return lhs.offset > rhs.offset;
    4696  }
    4697 };
    4698 
    4699 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4700 
    4701 // Cost of one additional allocation lost, as equivalent in bytes.
    4702 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4703 
    4704 /*
    4705 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4706 
    4707 If canMakeOtherLost was false:
    4708 - item points to a FREE suballocation.
    4709 - itemsToMakeLostCount is 0.
    4710 
    4711 If canMakeOtherLost was true:
    4712 - item points to first of sequence of suballocations, which are either FREE,
    4713  or point to VmaAllocations that can become lost.
    4714 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4715  the requested allocation to succeed.
    4716 */
    4717 struct VmaAllocationRequest
    4718 {
    4719  VkDeviceSize offset;
    4720  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4721  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4722  VmaSuballocationList::iterator item;
    4723  size_t itemsToMakeLostCount;
    4724  void* customData;
    4725 
    4726  VkDeviceSize CalcCost() const
    4727  {
    4728  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4729  }
    4730 };
    4731 
    4732 /*
    4733 Data structure used for bookkeeping of allocations and unused ranges of memory
    4734 in a single VkDeviceMemory block.
    4735 */
    4736 class VmaBlockMetadata
    4737 {
    4738 public:
    4739  VmaBlockMetadata(VmaAllocator hAllocator);
    4740  virtual ~VmaBlockMetadata() { }
    4741  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4742 
    4743  // Validates all data structures inside this object. If not valid, returns false.
    4744  virtual bool Validate() const = 0;
    4745  VkDeviceSize GetSize() const { return m_Size; }
    4746  virtual size_t GetAllocationCount() const = 0;
    4747  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4748  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4749  // Returns true if this block is empty - contains only single free suballocation.
    4750  virtual bool IsEmpty() const = 0;
    4751 
    4752  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4753  // Shouldn't modify blockCount.
    4754  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4755 
    4756 #if VMA_STATS_STRING_ENABLED
    4757  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4758 #endif
    4759 
    4760  // Tries to find a place for suballocation with given parameters inside this block.
    4761  // If succeeded, fills pAllocationRequest and returns true.
    4762  // If failed, returns false.
    4763  virtual bool CreateAllocationRequest(
    4764  uint32_t currentFrameIndex,
    4765  uint32_t frameInUseCount,
    4766  VkDeviceSize bufferImageGranularity,
    4767  VkDeviceSize allocSize,
    4768  VkDeviceSize allocAlignment,
    4769  bool upperAddress,
    4770  VmaSuballocationType allocType,
    4771  bool canMakeOtherLost,
    4772  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4773  VmaAllocationRequest* pAllocationRequest) = 0;
    4774 
    4775  virtual bool MakeRequestedAllocationsLost(
    4776  uint32_t currentFrameIndex,
    4777  uint32_t frameInUseCount,
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4781 
    4782  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4783 
    4784  // Makes actual allocation based on request. Request must already be checked and valid.
    4785  virtual void Alloc(
    4786  const VmaAllocationRequest& request,
    4787  VmaSuballocationType type,
    4788  VkDeviceSize allocSize,
    4789  bool upperAddress,
    4790  VmaAllocation hAllocation) = 0;
    4791 
    4792  // Frees suballocation assigned to given memory region.
    4793  virtual void Free(const VmaAllocation allocation) = 0;
    4794  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4795 
    4796  // Tries to resize (grow or shrink) space for given allocation, in place.
    4797  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4798 
    4799 protected:
    4800  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4801 
    4802 #if VMA_STATS_STRING_ENABLED
    4803  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4804  VkDeviceSize unusedBytes,
    4805  size_t allocationCount,
    4806  size_t unusedRangeCount) const;
    4807  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4808  VkDeviceSize offset,
    4809  VmaAllocation hAllocation) const;
    4810  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4811  VkDeviceSize offset,
    4812  VkDeviceSize size) const;
    4813  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4814 #endif
    4815 
    4816 private:
    4817  VkDeviceSize m_Size;
    4818  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4819 };
    4820 
    4821 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4822  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4823  return false; \
    4824  } } while(false)
    4825 
    4826 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4827 {
    4828  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4829 public:
    4830  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4831  virtual ~VmaBlockMetadata_Generic();
    4832  virtual void Init(VkDeviceSize size);
    4833 
    4834  virtual bool Validate() const;
    4835  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4836  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4837  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4838  virtual bool IsEmpty() const;
    4839 
    4840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4841  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4842 
    4843 #if VMA_STATS_STRING_ENABLED
    4844  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4845 #endif
    4846 
    4847  virtual bool CreateAllocationRequest(
    4848  uint32_t currentFrameIndex,
    4849  uint32_t frameInUseCount,
    4850  VkDeviceSize bufferImageGranularity,
    4851  VkDeviceSize allocSize,
    4852  VkDeviceSize allocAlignment,
    4853  bool upperAddress,
    4854  VmaSuballocationType allocType,
    4855  bool canMakeOtherLost,
    4856  uint32_t strategy,
    4857  VmaAllocationRequest* pAllocationRequest);
    4858 
    4859  virtual bool MakeRequestedAllocationsLost(
    4860  uint32_t currentFrameIndex,
    4861  uint32_t frameInUseCount,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4865 
    4866  virtual VkResult CheckCorruption(const void* pBlockData);
    4867 
    4868  virtual void Alloc(
    4869  const VmaAllocationRequest& request,
    4870  VmaSuballocationType type,
    4871  VkDeviceSize allocSize,
    4872  bool upperAddress,
    4873  VmaAllocation hAllocation);
    4874 
    4875  virtual void Free(const VmaAllocation allocation);
    4876  virtual void FreeAtOffset(VkDeviceSize offset);
    4877 
    4878  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4879 
    4880 private:
    4881  uint32_t m_FreeCount;
    4882  VkDeviceSize m_SumFreeSize;
    4883  VmaSuballocationList m_Suballocations;
    4884  // Suballocations that are free and have size greater than certain threshold.
    4885  // Sorted by size, ascending.
    4886  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4887 
    4888  bool ValidateFreeSuballocationList() const;
    4889 
    4890  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4891  // If yes, fills pOffset and returns true. If no, returns false.
    4892  bool CheckAllocation(
    4893  uint32_t currentFrameIndex,
    4894  uint32_t frameInUseCount,
    4895  VkDeviceSize bufferImageGranularity,
    4896  VkDeviceSize allocSize,
    4897  VkDeviceSize allocAlignment,
    4898  VmaSuballocationType allocType,
    4899  VmaSuballocationList::const_iterator suballocItem,
    4900  bool canMakeOtherLost,
    4901  VkDeviceSize* pOffset,
    4902  size_t* itemsToMakeLostCount,
    4903  VkDeviceSize* pSumFreeSize,
    4904  VkDeviceSize* pSumItemSize) const;
    4905  // Given free suballocation, it merges it with following one, which must also be free.
    4906  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4907  // Releases given suballocation, making it free.
    4908  // Merges it with adjacent free suballocations if applicable.
    4909  // Returns iterator to new free suballocation at this place.
    4910  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4911  // Given free suballocation, it inserts it into sorted list of
    4912  // m_FreeSuballocationsBySize if it's suitable.
    4913  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4914  // Given free suballocation, it removes it from sorted list of
    4915  // m_FreeSuballocationsBySize if it's suitable.
    4916  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4917 };
    4918 
    4919 /*
    4920 Allocations and their references in internal data structure look like this:
    4921 
    4922 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4923 
    4924  0 +-------+
    4925  | |
    4926  | |
    4927  | |
    4928  +-------+
    4929  | Alloc | 1st[m_1stNullItemsBeginCount]
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4932  +-------+
    4933  | ... |
    4934  +-------+
    4935  | Alloc | 1st[1st.size() - 1]
    4936  +-------+
    4937  | |
    4938  | |
    4939  | |
    4940 GetSize() +-------+
    4941 
    4942 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4943 
    4944  0 +-------+
    4945  | Alloc | 2nd[0]
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | ... |
    4950  +-------+
    4951  | Alloc | 2nd[2nd.size() - 1]
    4952  +-------+
    4953  | |
    4954  | |
    4955  | |
    4956  +-------+
    4957  | Alloc | 1st[m_1stNullItemsBeginCount]
    4958  +-------+
    4959  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4960  +-------+
    4961  | ... |
    4962  +-------+
    4963  | Alloc | 1st[1st.size() - 1]
    4964  +-------+
    4965  | |
    4966 GetSize() +-------+
    4967 
    4968 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4969 
    4970  0 +-------+
    4971  | |
    4972  | |
    4973  | |
    4974  +-------+
    4975  | Alloc | 1st[m_1stNullItemsBeginCount]
    4976  +-------+
    4977  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4978  +-------+
    4979  | ... |
    4980  +-------+
    4981  | Alloc | 1st[1st.size() - 1]
    4982  +-------+
    4983  | |
    4984  | |
    4985  | |
    4986  +-------+
    4987  | Alloc | 2nd[2nd.size() - 1]
    4988  +-------+
    4989  | ... |
    4990  +-------+
    4991  | Alloc | 2nd[1]
    4992  +-------+
    4993  | Alloc | 2nd[0]
    4994 GetSize() +-------+
    4995 
    4996 */
    4997 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4998 {
    4999  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5000 public:
    5001  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5002  virtual ~VmaBlockMetadata_Linear();
    5003  virtual void Init(VkDeviceSize size);
    5004 
    5005  virtual bool Validate() const;
    5006  virtual size_t GetAllocationCount() const;
    5007  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5008  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5009  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5010 
    5011  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5012  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5013 
    5014 #if VMA_STATS_STRING_ENABLED
    5015  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5016 #endif
    5017 
    5018  virtual bool CreateAllocationRequest(
    5019  uint32_t currentFrameIndex,
    5020  uint32_t frameInUseCount,
    5021  VkDeviceSize bufferImageGranularity,
    5022  VkDeviceSize allocSize,
    5023  VkDeviceSize allocAlignment,
    5024  bool upperAddress,
    5025  VmaSuballocationType allocType,
    5026  bool canMakeOtherLost,
    5027  uint32_t strategy,
    5028  VmaAllocationRequest* pAllocationRequest);
    5029 
    5030  virtual bool MakeRequestedAllocationsLost(
    5031  uint32_t currentFrameIndex,
    5032  uint32_t frameInUseCount,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5036 
    5037  virtual VkResult CheckCorruption(const void* pBlockData);
    5038 
    5039  virtual void Alloc(
    5040  const VmaAllocationRequest& request,
    5041  VmaSuballocationType type,
    5042  VkDeviceSize allocSize,
    5043  bool upperAddress,
    5044  VmaAllocation hAllocation);
    5045 
    5046  virtual void Free(const VmaAllocation allocation);
    5047  virtual void FreeAtOffset(VkDeviceSize offset);
    5048 
    5049 private:
    5050  /*
    5051  There are two suballocation vectors, used in ping-pong way.
    5052  The one with index m_1stVectorIndex is called 1st.
    5053  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5054  2nd can be non-empty only when 1st is not empty.
    5055  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5056  */
    5057  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5058 
    5059  enum SECOND_VECTOR_MODE
    5060  {
    5061  SECOND_VECTOR_EMPTY,
    5062  /*
    5063  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5064  all have smaller offset.
    5065  */
    5066  SECOND_VECTOR_RING_BUFFER,
    5067  /*
    5068  Suballocations in 2nd vector are upper side of double stack.
    5069  They all have offsets higher than those in 1st vector.
    5070  Top of this stack means smaller offsets, but higher indices in this vector.
    5071  */
    5072  SECOND_VECTOR_DOUBLE_STACK,
    5073  };
    5074 
    5075  VkDeviceSize m_SumFreeSize;
    5076  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5077  uint32_t m_1stVectorIndex;
    5078  SECOND_VECTOR_MODE m_2ndVectorMode;
    5079 
    5080  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5081  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5082  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5083  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5084 
    5085  // Number of items in 1st vector with hAllocation = null at the beginning.
    5086  size_t m_1stNullItemsBeginCount;
    5087  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5088  size_t m_1stNullItemsMiddleCount;
    5089  // Number of items in 2nd vector with hAllocation = null.
    5090  size_t m_2ndNullItemsCount;
    5091 
    5092  bool ShouldCompact1st() const;
    5093  void CleanupAfterFree();
    5094 };
    5095 
    5096 /*
    5097 - GetSize() is the original size of allocated memory block.
    5098 - m_UsableSize is this size aligned down to a power of two.
    5099  All allocations and calculations happen relative to m_UsableSize.
    5100 - GetUnusableSize() is the difference between them.
    5101  It is repoted as separate, unused range, not available for allocations.
    5102 
    5103 Node at level 0 has size = m_UsableSize.
    5104 Each next level contains nodes with size 2 times smaller than current level.
    5105 m_LevelCount is the maximum number of levels to use in the current object.
    5106 */
    5107 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5108 {
    5109  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5110 public:
    5111  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5112  virtual ~VmaBlockMetadata_Buddy();
    5113  virtual void Init(VkDeviceSize size);
    5114 
    5115  virtual bool Validate() const;
    5116  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5117  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5118  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5119  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5120 
    5121  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5122  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5123 
    5124 #if VMA_STATS_STRING_ENABLED
    5125  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5126 #endif
    5127 
    5128  virtual bool CreateAllocationRequest(
    5129  uint32_t currentFrameIndex,
    5130  uint32_t frameInUseCount,
    5131  VkDeviceSize bufferImageGranularity,
    5132  VkDeviceSize allocSize,
    5133  VkDeviceSize allocAlignment,
    5134  bool upperAddress,
    5135  VmaSuballocationType allocType,
    5136  bool canMakeOtherLost,
    5137  uint32_t strategy,
    5138  VmaAllocationRequest* pAllocationRequest);
    5139 
    5140  virtual bool MakeRequestedAllocationsLost(
    5141  uint32_t currentFrameIndex,
    5142  uint32_t frameInUseCount,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5146 
    5147  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5148 
    5149  virtual void Alloc(
    5150  const VmaAllocationRequest& request,
    5151  VmaSuballocationType type,
    5152  VkDeviceSize allocSize,
    5153  bool upperAddress,
    5154  VmaAllocation hAllocation);
    5155 
    5156  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5157  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5158 
    5159 private:
    5160  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5161  static const size_t MAX_LEVELS = 30;
    5162 
    5163  struct ValidationContext
    5164  {
    5165  size_t calculatedAllocationCount;
    5166  size_t calculatedFreeCount;
    5167  VkDeviceSize calculatedSumFreeSize;
    5168 
    5169  ValidationContext() :
    5170  calculatedAllocationCount(0),
    5171  calculatedFreeCount(0),
    5172  calculatedSumFreeSize(0) { }
    5173  };
    5174 
    5175  struct Node
    5176  {
    5177  VkDeviceSize offset;
    5178  enum TYPE
    5179  {
    5180  TYPE_FREE,
    5181  TYPE_ALLOCATION,
    5182  TYPE_SPLIT,
    5183  TYPE_COUNT
    5184  } type;
    5185  Node* parent;
    5186  Node* buddy;
    5187 
    5188  union
    5189  {
    5190  struct
    5191  {
    5192  Node* prev;
    5193  Node* next;
    5194  } free;
    5195  struct
    5196  {
    5197  VmaAllocation alloc;
    5198  } allocation;
    5199  struct
    5200  {
    5201  Node* leftChild;
    5202  } split;
    5203  };
    5204  };
    5205 
    5206  // Size of the memory block aligned down to a power of two.
    5207  VkDeviceSize m_UsableSize;
    5208  uint32_t m_LevelCount;
    5209 
    5210  Node* m_Root;
    5211  struct {
    5212  Node* front;
    5213  Node* back;
    5214  } m_FreeList[MAX_LEVELS];
    5215  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5216  size_t m_AllocationCount;
    5217  // Number of nodes in the tree with type == TYPE_FREE.
    5218  size_t m_FreeCount;
    5219  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5220  VkDeviceSize m_SumFreeSize;
    5221 
    5222  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5223  void DeleteNode(Node* node);
    5224  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5225  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5226  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5227  // Alloc passed just for validation. Can be null.
    5228  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5229  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5230  // Adds node to the front of FreeList at given level.
    5231  // node->type must be FREE.
    5232  // node->free.prev, next can be undefined.
    5233  void AddToFreeListFront(uint32_t level, Node* node);
    5234  // Removes node from FreeList at given level.
    5235  // node->type must be FREE.
    5236  // node->free.prev, next stay untouched.
    5237  void RemoveFromFreeList(uint32_t level, Node* node);
    5238 
    5239 #if VMA_STATS_STRING_ENABLED
    5240  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5241 #endif
    5242 };
    5243 
    5244 /*
    5245 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5246 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5247 
    5248 Thread-safety: This class must be externally synchronized.
    5249 */
    5250 class VmaDeviceMemoryBlock
    5251 {
    5252  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5253 public:
    5254  VmaBlockMetadata* m_pMetadata;
    5255 
    5256  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5257 
    5258  ~VmaDeviceMemoryBlock()
    5259  {
    5260  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5261  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5262  }
    5263 
    5264  // Always call after construction.
    5265  void Init(
    5266  VmaAllocator hAllocator,
    5267  uint32_t newMemoryTypeIndex,
    5268  VkDeviceMemory newMemory,
    5269  VkDeviceSize newSize,
    5270  uint32_t id,
    5271  uint32_t algorithm);
    5272  // Always call before destruction.
    5273  void Destroy(VmaAllocator allocator);
    5274 
    5275  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5277  uint32_t GetId() const { return m_Id; }
    5278  void* GetMappedData() const { return m_pMappedData; }
    5279 
    5280  // Validates all data structures inside this object. If not valid, returns false.
    5281  bool Validate() const;
    5282 
    5283  VkResult CheckCorruption(VmaAllocator hAllocator);
    5284 
    5285  // ppData can be null.
    5286  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5287  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5288 
    5289  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5290  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5291 
    5292  VkResult BindBufferMemory(
    5293  const VmaAllocator hAllocator,
    5294  const VmaAllocation hAllocation,
    5295  VkBuffer hBuffer);
    5296  VkResult BindImageMemory(
    5297  const VmaAllocator hAllocator,
    5298  const VmaAllocation hAllocation,
    5299  VkImage hImage);
    5300 
    5301 private:
    5302  uint32_t m_MemoryTypeIndex;
    5303  uint32_t m_Id;
    5304  VkDeviceMemory m_hMemory;
    5305 
    5306  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5307  // Also protects m_MapCount, m_pMappedData.
    5308  VMA_MUTEX m_Mutex;
    5309  uint32_t m_MapCount;
    5310  void* m_pMappedData;
    5311 };
    5312 
    5313 struct VmaPointerLess
    5314 {
    5315  bool operator()(const void* lhs, const void* rhs) const
    5316  {
    5317  return lhs < rhs;
    5318  }
    5319 };
    5320 
    5321 class VmaDefragmentator;
    5322 
    5323 /*
    5324 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5325 Vulkan memory type.
    5326 
    5327 Synchronized internally with a mutex.
    5328 */
    5329 struct VmaBlockVector
    5330 {
    5331  VMA_CLASS_NO_COPY(VmaBlockVector)
    5332 public:
    5333  VmaBlockVector(
    5334  VmaAllocator hAllocator,
    5335  uint32_t memoryTypeIndex,
    5336  VkDeviceSize preferredBlockSize,
    5337  size_t minBlockCount,
    5338  size_t maxBlockCount,
    5339  VkDeviceSize bufferImageGranularity,
    5340  uint32_t frameInUseCount,
    5341  bool isCustomPool,
    5342  bool explicitBlockSize,
    5343  uint32_t algorithm);
    5344  ~VmaBlockVector();
    5345 
    5346  VkResult CreateMinBlocks();
    5347 
    5348  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5349  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5350  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5351  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5352  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5353 
    5354  void GetPoolStats(VmaPoolStats* pStats);
    5355 
    5356  bool IsEmpty() const { return m_Blocks.empty(); }
    5357  bool IsCorruptionDetectionEnabled() const;
    5358 
    5359  VkResult Allocate(
    5360  VmaPool hCurrentPool,
    5361  uint32_t currentFrameIndex,
    5362  VkDeviceSize size,
    5363  VkDeviceSize alignment,
    5364  const VmaAllocationCreateInfo& createInfo,
    5365  VmaSuballocationType suballocType,
    5366  VmaAllocation* pAllocation);
    5367 
    5368  void Free(
    5369  VmaAllocation hAllocation);
    5370 
    5371  // Adds statistics of this BlockVector to pStats.
    5372  void AddStats(VmaStats* pStats);
    5373 
    5374 #if VMA_STATS_STRING_ENABLED
    5375  void PrintDetailedMap(class VmaJsonWriter& json);
    5376 #endif
    5377 
    5378  void MakePoolAllocationsLost(
    5379  uint32_t currentFrameIndex,
    5380  size_t* pLostAllocationCount);
    5381  VkResult CheckCorruption();
    5382 
    5383  VmaDefragmentator* EnsureDefragmentator(
    5384  VmaAllocator hAllocator,
    5385  uint32_t currentFrameIndex);
    5386 
    5387  VkResult Defragment(
    5388  VmaDefragmentationStats* pDefragmentationStats,
    5389  VkDeviceSize& maxBytesToMove,
    5390  uint32_t& maxAllocationsToMove);
    5391 
    5392  void DestroyDefragmentator();
    5393 
    5394 private:
    5395  friend class VmaDefragmentator;
    5396 
    5397  const VmaAllocator m_hAllocator;
    5398  const uint32_t m_MemoryTypeIndex;
    5399  const VkDeviceSize m_PreferredBlockSize;
    5400  const size_t m_MinBlockCount;
    5401  const size_t m_MaxBlockCount;
    5402  const VkDeviceSize m_BufferImageGranularity;
    5403  const uint32_t m_FrameInUseCount;
    5404  const bool m_IsCustomPool;
    5405  const bool m_ExplicitBlockSize;
    5406  const uint32_t m_Algorithm;
    5407  bool m_HasEmptyBlock;
    5408  VMA_MUTEX m_Mutex;
    5409  // Incrementally sorted by sumFreeSize, ascending.
    5410  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5411  /* There can be at most one allocation that is completely empty - a
    5412  hysteresis to avoid pessimistic case of alternating creation and destruction
    5413  of a VkDeviceMemory. */
    5414  VmaDefragmentator* m_pDefragmentator;
    5415  uint32_t m_NextBlockId;
    5416 
    5417  VkDeviceSize CalcMaxBlockSize() const;
    5418 
    5419  // Finds and removes given block from vector.
    5420  void Remove(VmaDeviceMemoryBlock* pBlock);
    5421 
    5422  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5423  // after this call.
    5424  void IncrementallySortBlocks();
    5425 
    5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5427  VkResult AllocateFromBlock(
    5428  VmaDeviceMemoryBlock* pBlock,
    5429  VmaPool hCurrentPool,
    5430  uint32_t currentFrameIndex,
    5431  VkDeviceSize size,
    5432  VkDeviceSize alignment,
    5433  VmaAllocationCreateFlags allocFlags,
    5434  void* pUserData,
    5435  VmaSuballocationType suballocType,
    5436  uint32_t strategy,
    5437  VmaAllocation* pAllocation);
    5438 
    5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5440 };
    5441 
    5442 struct VmaPool_T
    5443 {
    5444  VMA_CLASS_NO_COPY(VmaPool_T)
    5445 public:
    5446  VmaBlockVector m_BlockVector;
    5447 
    5448  VmaPool_T(
    5449  VmaAllocator hAllocator,
    5450  const VmaPoolCreateInfo& createInfo,
    5451  VkDeviceSize preferredBlockSize);
    5452  ~VmaPool_T();
    5453 
    5454  uint32_t GetId() const { return m_Id; }
    5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5456 
    5457 #if VMA_STATS_STRING_ENABLED
    5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5459 #endif
    5460 
    5461 private:
    5462  uint32_t m_Id;
    5463 };
    5464 
    5465 class VmaDefragmentator
    5466 {
    5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5468 private:
    5469  const VmaAllocator m_hAllocator;
    5470  VmaBlockVector* const m_pBlockVector;
    5471  uint32_t m_CurrentFrameIndex;
    5472  VkDeviceSize m_BytesMoved;
    5473  uint32_t m_AllocationsMoved;
    5474 
    5475  struct AllocationInfo
    5476  {
    5477  VmaAllocation m_hAllocation;
    5478  VkBool32* m_pChanged;
    5479 
    5480  AllocationInfo() :
    5481  m_hAllocation(VK_NULL_HANDLE),
    5482  m_pChanged(VMA_NULL)
    5483  {
    5484  }
    5485  };
    5486 
    5487  struct AllocationInfoSizeGreater
    5488  {
    5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5490  {
    5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5492  }
    5493  };
    5494 
    5495  // Used between AddAllocation and Defragment.
    5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5497 
    5498  struct BlockInfo
    5499  {
    5500  VmaDeviceMemoryBlock* m_pBlock;
    5501  bool m_HasNonMovableAllocations;
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5505  m_pBlock(VMA_NULL),
    5506  m_HasNonMovableAllocations(true),
    5507  m_Allocations(pAllocationCallbacks),
    5508  m_pMappedDataForDefragmentation(VMA_NULL)
    5509  {
    5510  }
    5511 
    5512  void CalcHasNonMovableAllocations()
    5513  {
    5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5515  const size_t defragmentAllocCount = m_Allocations.size();
    5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5517  }
    5518 
    5519  void SortAllocationsBySizeDescecnding()
    5520  {
    5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5522  }
    5523 
    5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5525  void Unmap(VmaAllocator hAllocator);
    5526 
    5527  private:
    5528  // Not null if mapped for defragmentation only, not originally mapped.
    5529  void* m_pMappedDataForDefragmentation;
    5530  };
    5531 
    5532  struct BlockPointerLess
    5533  {
    5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5535  {
    5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5537  }
    5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5539  {
    5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5541  }
    5542  };
    5543 
    5544  // 1. Blocks with some non-movable allocations go first.
    5545  // 2. Blocks with smaller sumFreeSize go first.
    5546  struct BlockInfoCompareMoveDestination
    5547  {
    5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5549  {
    5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5551  {
    5552  return true;
    5553  }
    5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5555  {
    5556  return false;
    5557  }
    5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5559  {
    5560  return true;
    5561  }
    5562  return false;
    5563  }
    5564  };
    5565 
    5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5567  BlockInfoVector m_Blocks;
    5568 
    5569  VkResult DefragmentRound(
    5570  VkDeviceSize maxBytesToMove,
    5571  uint32_t maxAllocationsToMove);
    5572 
    5573  static bool MoveMakesSense(
    5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5576 
    5577 public:
    5578  VmaDefragmentator(
    5579  VmaAllocator hAllocator,
    5580  VmaBlockVector* pBlockVector,
    5581  uint32_t currentFrameIndex);
    5582 
    5583  ~VmaDefragmentator();
    5584 
    5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5587 
    5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5589 
    5590  VkResult Defragment(
    5591  VkDeviceSize maxBytesToMove,
    5592  uint32_t maxAllocationsToMove);
    5593 };
    5594 
    5595 #if VMA_RECORDING_ENABLED
    5596 
    5597 class VmaRecorder
    5598 {
    5599 public:
    5600  VmaRecorder();
    5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5602  void WriteConfiguration(
    5603  const VkPhysicalDeviceProperties& devProps,
    5604  const VkPhysicalDeviceMemoryProperties& memProps,
    5605  bool dedicatedAllocationExtensionEnabled);
    5606  ~VmaRecorder();
    5607 
    5608  void RecordCreateAllocator(uint32_t frameIndex);
    5609  void RecordDestroyAllocator(uint32_t frameIndex);
    5610  void RecordCreatePool(uint32_t frameIndex,
    5611  const VmaPoolCreateInfo& createInfo,
    5612  VmaPool pool);
    5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5614  void RecordAllocateMemory(uint32_t frameIndex,
    5615  const VkMemoryRequirements& vkMemReq,
    5616  const VmaAllocationCreateInfo& createInfo,
    5617  VmaAllocation allocation);
    5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5619  const VkMemoryRequirements& vkMemReq,
    5620  bool requiresDedicatedAllocation,
    5621  bool prefersDedicatedAllocation,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordFreeMemory(uint32_t frameIndex,
    5631  VmaAllocation allocation);
    5632  void RecordResizeAllocation(
    5633  uint32_t frameIndex,
    5634  VmaAllocation allocation,
    5635  VkDeviceSize newSize);
    5636  void RecordSetAllocationUserData(uint32_t frameIndex,
    5637  VmaAllocation allocation,
    5638  const void* pUserData);
    5639  void RecordCreateLostAllocation(uint32_t frameIndex,
    5640  VmaAllocation allocation);
    5641  void RecordMapMemory(uint32_t frameIndex,
    5642  VmaAllocation allocation);
    5643  void RecordUnmapMemory(uint32_t frameIndex,
    5644  VmaAllocation allocation);
    5645  void RecordFlushAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5647  void RecordInvalidateAllocation(uint32_t frameIndex,
    5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5649  void RecordCreateBuffer(uint32_t frameIndex,
    5650  const VkBufferCreateInfo& bufCreateInfo,
    5651  const VmaAllocationCreateInfo& allocCreateInfo,
    5652  VmaAllocation allocation);
    5653  void RecordCreateImage(uint32_t frameIndex,
    5654  const VkImageCreateInfo& imageCreateInfo,
    5655  const VmaAllocationCreateInfo& allocCreateInfo,
    5656  VmaAllocation allocation);
    5657  void RecordDestroyBuffer(uint32_t frameIndex,
    5658  VmaAllocation allocation);
    5659  void RecordDestroyImage(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordTouchAllocation(uint32_t frameIndex,
    5662  VmaAllocation allocation);
    5663  void RecordGetAllocationInfo(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5666  VmaPool pool);
    5667 
    5668 private:
    5669  struct CallParams
    5670  {
    5671  uint32_t threadId;
    5672  double time;
    5673  };
    5674 
    5675  class UserDataString
    5676  {
    5677  public:
    5678  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5679  const char* GetString() const { return m_Str; }
    5680 
    5681  private:
    5682  char m_PtrStr[17];
    5683  const char* m_Str;
    5684  };
    5685 
    5686  bool m_UseMutex;
    5687  VmaRecordFlags m_Flags;
    5688  FILE* m_File;
    5689  VMA_MUTEX m_FileMutex;
    5690  int64_t m_Freq;
    5691  int64_t m_StartCounter;
    5692 
    5693  void GetBasicParams(CallParams& outParams);
    5694  void Flush();
    5695 };
    5696 
    5697 #endif // #if VMA_RECORDING_ENABLED
    5698 
    5699 // Main allocator object.
    5700 struct VmaAllocator_T
    5701 {
    5702  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5703 public:
    5704  bool m_UseMutex;
    5705  bool m_UseKhrDedicatedAllocation;
    5706  VkDevice m_hDevice;
    5707  bool m_AllocationCallbacksSpecified;
    5708  VkAllocationCallbacks m_AllocationCallbacks;
    5709  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5710 
    5711  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5712  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5713  VMA_MUTEX m_HeapSizeLimitMutex;
    5714 
    5715  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5716  VkPhysicalDeviceMemoryProperties m_MemProps;
    5717 
    5718  // Default pools.
    5719  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5720 
    5721  // Each vector is sorted by memory (handle value).
    5722  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5723  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5724  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5725 
    5726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5728  ~VmaAllocator_T();
    5729 
    5730  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5731  {
    5732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5733  }
    5734  const VmaVulkanFunctions& GetVulkanFunctions() const
    5735  {
    5736  return m_VulkanFunctions;
    5737  }
    5738 
    5739  VkDeviceSize GetBufferImageGranularity() const
    5740  {
    5741  return VMA_MAX(
    5742  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5743  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5744  }
    5745 
    5746  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5747  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5748 
    5749  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5750  {
    5751  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5752  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5753  }
    5754  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5755  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5756  {
    5757  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5758  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5759  }
    5760  // Minimum alignment for all allocations in specific memory type.
    5761  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5762  {
    5763  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5764  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5765  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5766  }
    5767 
    5768  bool IsIntegratedGpu() const
    5769  {
    5770  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5771  }
    5772 
    5773 #if VMA_RECORDING_ENABLED
    5774  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5775 #endif
    5776 
    5777  void GetBufferMemoryRequirements(
    5778  VkBuffer hBuffer,
    5779  VkMemoryRequirements& memReq,
    5780  bool& requiresDedicatedAllocation,
    5781  bool& prefersDedicatedAllocation) const;
    5782  void GetImageMemoryRequirements(
    5783  VkImage hImage,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787 
    5788  // Main allocation function.
    5789  VkResult AllocateMemory(
    5790  const VkMemoryRequirements& vkMemReq,
    5791  bool requiresDedicatedAllocation,
    5792  bool prefersDedicatedAllocation,
    5793  VkBuffer dedicatedBuffer,
    5794  VkImage dedicatedImage,
    5795  const VmaAllocationCreateInfo& createInfo,
    5796  VmaSuballocationType suballocType,
    5797  VmaAllocation* pAllocation);
    5798 
    5799  // Main deallocation function.
    5800  void FreeMemory(const VmaAllocation allocation);
    5801 
    5802  VkResult ResizeAllocation(
    5803  const VmaAllocation alloc,
    5804  VkDeviceSize newSize);
    5805 
    5806  void CalculateStats(VmaStats* pStats);
    5807 
    5808 #if VMA_STATS_STRING_ENABLED
    5809  void PrintDetailedMap(class VmaJsonWriter& json);
    5810 #endif
    5811 
    5812  VkResult Defragment(
    5813  VmaAllocation* pAllocations,
    5814  size_t allocationCount,
    5815  VkBool32* pAllocationsChanged,
    5816  const VmaDefragmentationInfo* pDefragmentationInfo,
    5817  VmaDefragmentationStats* pDefragmentationStats);
    5818 
    5819  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5820  bool TouchAllocation(VmaAllocation hAllocation);
    5821 
    5822  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5823  void DestroyPool(VmaPool pool);
    5824  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5825 
    5826  void SetCurrentFrameIndex(uint32_t frameIndex);
    5827  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5828 
    5829  void MakePoolAllocationsLost(
    5830  VmaPool hPool,
    5831  size_t* pLostAllocationCount);
    5832  VkResult CheckPoolCorruption(VmaPool hPool);
    5833  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5834 
    5835  void CreateLostAllocation(VmaAllocation* pAllocation);
    5836 
    5837  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5838  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5839 
    5840  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5841  void Unmap(VmaAllocation hAllocation);
    5842 
    5843  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5844  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5845 
    5846  void FlushOrInvalidateAllocation(
    5847  VmaAllocation hAllocation,
    5848  VkDeviceSize offset, VkDeviceSize size,
    5849  VMA_CACHE_OPERATION op);
    5850 
    5851  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5852 
    5853 private:
    5854  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5855 
    5856  VkPhysicalDevice m_PhysicalDevice;
    5857  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5858 
    5859  VMA_MUTEX m_PoolsMutex;
    5860  // Protected by m_PoolsMutex. Sorted by pointer value.
    5861  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5862  uint32_t m_NextPoolId;
    5863 
    5864  VmaVulkanFunctions m_VulkanFunctions;
    5865 
    5866 #if VMA_RECORDING_ENABLED
    5867  VmaRecorder* m_pRecorder;
    5868 #endif
    5869 
    5870  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5871 
    5872  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5873 
    5874  VkResult AllocateMemoryOfType(
    5875  VkDeviceSize size,
    5876  VkDeviceSize alignment,
    5877  bool dedicatedAllocation,
    5878  VkBuffer dedicatedBuffer,
    5879  VkImage dedicatedImage,
    5880  const VmaAllocationCreateInfo& createInfo,
    5881  uint32_t memTypeIndex,
    5882  VmaSuballocationType suballocType,
    5883  VmaAllocation* pAllocation);
    5884 
    5885  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5886  VkResult AllocateDedicatedMemory(
    5887  VkDeviceSize size,
    5888  VmaSuballocationType suballocType,
    5889  uint32_t memTypeIndex,
    5890  bool map,
    5891  bool isUserDataString,
    5892  void* pUserData,
    5893  VkBuffer dedicatedBuffer,
    5894  VkImage dedicatedImage,
    5895  VmaAllocation* pAllocation);
    5896 
    5897  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5898  void FreeDedicatedMemory(VmaAllocation allocation);
    5899 };
    5900 
    5902 // Memory allocation #2 after VmaAllocator_T definition
    5903 
    5904 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5905 {
    5906  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5907 }
    5908 
    5909 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5910 {
    5911  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5912 }
    5913 
    5914 template<typename T>
    5915 static T* VmaAllocate(VmaAllocator hAllocator)
    5916 {
    5917  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5928 {
    5929  if(ptr != VMA_NULL)
    5930  {
    5931  ptr->~T();
    5932  VmaFree(hAllocator, ptr);
    5933  }
    5934 }
    5935 
    5936 template<typename T>
    5937 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5938 {
    5939  if(ptr != VMA_NULL)
    5940  {
    5941  for(size_t i = count; i--; )
    5942  ptr[i].~T();
    5943  VmaFree(hAllocator, ptr);
    5944  }
    5945 }
    5946 
    5948 // VmaStringBuilder
    5949 
    5950 #if VMA_STATS_STRING_ENABLED
    5951 
    5952 class VmaStringBuilder
    5953 {
    5954 public:
    5955  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5956  size_t GetLength() const { return m_Data.size(); }
    5957  const char* GetData() const { return m_Data.data(); }
    5958 
    5959  void Add(char ch) { m_Data.push_back(ch); }
    5960  void Add(const char* pStr);
    5961  void AddNewLine() { Add('\n'); }
    5962  void AddNumber(uint32_t num);
    5963  void AddNumber(uint64_t num);
    5964  void AddPointer(const void* ptr);
    5965 
    5966 private:
    5967  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5968 };
    5969 
    5970 void VmaStringBuilder::Add(const char* pStr)
    5971 {
    5972  const size_t strLen = strlen(pStr);
    5973  if(strLen > 0)
    5974  {
    5975  const size_t oldCount = m_Data.size();
    5976  m_Data.resize(oldCount + strLen);
    5977  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5978  }
    5979 }
    5980 
    5981 void VmaStringBuilder::AddNumber(uint32_t num)
    5982 {
    5983  char buf[11];
    5984  VmaUint32ToStr(buf, sizeof(buf), num);
    5985  Add(buf);
    5986 }
    5987 
    5988 void VmaStringBuilder::AddNumber(uint64_t num)
    5989 {
    5990  char buf[21];
    5991  VmaUint64ToStr(buf, sizeof(buf), num);
    5992  Add(buf);
    5993 }
    5994 
    5995 void VmaStringBuilder::AddPointer(const void* ptr)
    5996 {
    5997  char buf[21];
    5998  VmaPtrToStr(buf, sizeof(buf), ptr);
    5999  Add(buf);
    6000 }
    6001 
    6002 #endif // #if VMA_STATS_STRING_ENABLED
    6003 
    6005 // VmaJsonWriter
    6006 
    6007 #if VMA_STATS_STRING_ENABLED
    6008 
    6009 class VmaJsonWriter
    6010 {
    6011  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6012 public:
    6013  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6014  ~VmaJsonWriter();
    6015 
    6016  void BeginObject(bool singleLine = false);
    6017  void EndObject();
    6018 
    6019  void BeginArray(bool singleLine = false);
    6020  void EndArray();
    6021 
    6022  void WriteString(const char* pStr);
    6023  void BeginString(const char* pStr = VMA_NULL);
    6024  void ContinueString(const char* pStr);
    6025  void ContinueString(uint32_t n);
    6026  void ContinueString(uint64_t n);
    6027  void ContinueString_Pointer(const void* ptr);
    6028  void EndString(const char* pStr = VMA_NULL);
    6029 
    6030  void WriteNumber(uint32_t n);
    6031  void WriteNumber(uint64_t n);
    6032  void WriteBool(bool b);
    6033  void WriteNull();
    6034 
    6035 private:
    6036  static const char* const INDENT;
    6037 
    6038  enum COLLECTION_TYPE
    6039  {
    6040  COLLECTION_TYPE_OBJECT,
    6041  COLLECTION_TYPE_ARRAY,
    6042  };
    6043  struct StackItem
    6044  {
    6045  COLLECTION_TYPE type;
    6046  uint32_t valueCount;
    6047  bool singleLineMode;
    6048  };
    6049 
    6050  VmaStringBuilder& m_SB;
    6051  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6052  bool m_InsideString;
    6053 
    6054  void BeginValue(bool isString);
    6055  void WriteIndent(bool oneLess = false);
    6056 };
    6057 
    6058 const char* const VmaJsonWriter::INDENT = " ";
    6059 
    6060 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6061  m_SB(sb),
    6062  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6063  m_InsideString(false)
    6064 {
    6065 }
    6066 
    6067 VmaJsonWriter::~VmaJsonWriter()
    6068 {
    6069  VMA_ASSERT(!m_InsideString);
    6070  VMA_ASSERT(m_Stack.empty());
    6071 }
    6072 
    6073 void VmaJsonWriter::BeginObject(bool singleLine)
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076 
    6077  BeginValue(false);
    6078  m_SB.Add('{');
    6079 
    6080  StackItem item;
    6081  item.type = COLLECTION_TYPE_OBJECT;
    6082  item.valueCount = 0;
    6083  item.singleLineMode = singleLine;
    6084  m_Stack.push_back(item);
    6085 }
    6086 
    6087 void VmaJsonWriter::EndObject()
    6088 {
    6089  VMA_ASSERT(!m_InsideString);
    6090 
    6091  WriteIndent(true);
    6092  m_SB.Add('}');
    6093 
    6094  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6095  m_Stack.pop_back();
    6096 }
    6097 
    6098 void VmaJsonWriter::BeginArray(bool singleLine)
    6099 {
    6100  VMA_ASSERT(!m_InsideString);
    6101 
    6102  BeginValue(false);
    6103  m_SB.Add('[');
    6104 
    6105  StackItem item;
    6106  item.type = COLLECTION_TYPE_ARRAY;
    6107  item.valueCount = 0;
    6108  item.singleLineMode = singleLine;
    6109  m_Stack.push_back(item);
    6110 }
    6111 
    6112 void VmaJsonWriter::EndArray()
    6113 {
    6114  VMA_ASSERT(!m_InsideString);
    6115 
    6116  WriteIndent(true);
    6117  m_SB.Add(']');
    6118 
    6119  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6120  m_Stack.pop_back();
    6121 }
    6122 
    6123 void VmaJsonWriter::WriteString(const char* pStr)
    6124 {
    6125  BeginString(pStr);
    6126  EndString();
    6127 }
    6128 
    6129 void VmaJsonWriter::BeginString(const char* pStr)
    6130 {
    6131  VMA_ASSERT(!m_InsideString);
    6132 
    6133  BeginValue(true);
    6134  m_SB.Add('"');
    6135  m_InsideString = true;
    6136  if(pStr != VMA_NULL && pStr[0] != '\0')
    6137  {
    6138  ContinueString(pStr);
    6139  }
    6140 }
    6141 
    6142 void VmaJsonWriter::ContinueString(const char* pStr)
    6143 {
    6144  VMA_ASSERT(m_InsideString);
    6145 
    6146  const size_t strLen = strlen(pStr);
    6147  for(size_t i = 0; i < strLen; ++i)
    6148  {
    6149  char ch = pStr[i];
    6150  if(ch == '\\')
    6151  {
    6152  m_SB.Add("\\\\");
    6153  }
    6154  else if(ch == '"')
    6155  {
    6156  m_SB.Add("\\\"");
    6157  }
    6158  else if(ch >= 32)
    6159  {
    6160  m_SB.Add(ch);
    6161  }
    6162  else switch(ch)
    6163  {
    6164  case '\b':
    6165  m_SB.Add("\\b");
    6166  break;
    6167  case '\f':
    6168  m_SB.Add("\\f");
    6169  break;
    6170  case '\n':
    6171  m_SB.Add("\\n");
    6172  break;
    6173  case '\r':
    6174  m_SB.Add("\\r");
    6175  break;
    6176  case '\t':
    6177  m_SB.Add("\\t");
    6178  break;
    6179  default:
    6180  VMA_ASSERT(0 && "Character not currently supported.");
    6181  break;
    6182  }
    6183  }
    6184 }
    6185 
    6186 void VmaJsonWriter::ContinueString(uint32_t n)
    6187 {
    6188  VMA_ASSERT(m_InsideString);
    6189  m_SB.AddNumber(n);
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint64_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddPointer(ptr);
    6202 }
    6203 
    6204 void VmaJsonWriter::EndString(const char* pStr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  if(pStr != VMA_NULL && pStr[0] != '\0')
    6208  {
    6209  ContinueString(pStr);
    6210  }
    6211  m_SB.Add('"');
    6212  m_InsideString = false;
    6213 }
    6214 
    6215 void VmaJsonWriter::WriteNumber(uint32_t n)
    6216 {
    6217  VMA_ASSERT(!m_InsideString);
    6218  BeginValue(false);
    6219  m_SB.AddNumber(n);
    6220 }
    6221 
    6222 void VmaJsonWriter::WriteNumber(uint64_t n)
    6223 {
    6224  VMA_ASSERT(!m_InsideString);
    6225  BeginValue(false);
    6226  m_SB.AddNumber(n);
    6227 }
    6228 
    6229 void VmaJsonWriter::WriteBool(bool b)
    6230 {
    6231  VMA_ASSERT(!m_InsideString);
    6232  BeginValue(false);
    6233  m_SB.Add(b ? "true" : "false");
    6234 }
    6235 
    6236 void VmaJsonWriter::WriteNull()
    6237 {
    6238  VMA_ASSERT(!m_InsideString);
    6239  BeginValue(false);
    6240  m_SB.Add("null");
    6241 }
    6242 
    6243 void VmaJsonWriter::BeginValue(bool isString)
    6244 {
    6245  if(!m_Stack.empty())
    6246  {
    6247  StackItem& currItem = m_Stack.back();
    6248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6249  currItem.valueCount % 2 == 0)
    6250  {
    6251  VMA_ASSERT(isString);
    6252  }
    6253 
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 != 0)
    6256  {
    6257  m_SB.Add(": ");
    6258  }
    6259  else if(currItem.valueCount > 0)
    6260  {
    6261  m_SB.Add(", ");
    6262  WriteIndent();
    6263  }
    6264  else
    6265  {
    6266  WriteIndent();
    6267  }
    6268  ++currItem.valueCount;
    6269  }
    6270 }
    6271 
    6272 void VmaJsonWriter::WriteIndent(bool oneLess)
    6273 {
    6274  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6275  {
    6276  m_SB.AddNewLine();
    6277 
    6278  size_t count = m_Stack.size();
    6279  if(count > 0 && oneLess)
    6280  {
    6281  --count;
    6282  }
    6283  for(size_t i = 0; i < count; ++i)
    6284  {
    6285  m_SB.Add(INDENT);
    6286  }
    6287  }
    6288 }
    6289 
    6290 #endif // #if VMA_STATS_STRING_ENABLED
    6291 
    6293 
    6294 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6295 {
    6296  if(IsUserDataString())
    6297  {
    6298  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6299 
    6300  FreeUserDataString(hAllocator);
    6301 
    6302  if(pUserData != VMA_NULL)
    6303  {
    6304  const char* const newStrSrc = (char*)pUserData;
    6305  const size_t newStrLen = strlen(newStrSrc);
    6306  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6307  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6308  m_pUserData = newStrDst;
    6309  }
    6310  }
    6311  else
    6312  {
    6313  m_pUserData = pUserData;
    6314  }
    6315 }
    6316 
    6317 void VmaAllocation_T::ChangeBlockAllocation(
    6318  VmaAllocator hAllocator,
    6319  VmaDeviceMemoryBlock* block,
    6320  VkDeviceSize offset)
    6321 {
    6322  VMA_ASSERT(block != VMA_NULL);
    6323  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6324 
    6325  // Move mapping reference counter from old block to new block.
    6326  if(block != m_BlockAllocation.m_Block)
    6327  {
    6328  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6329  if(IsPersistentMap())
    6330  ++mapRefCount;
    6331  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6332  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6333  }
    6334 
    6335  m_BlockAllocation.m_Block = block;
    6336  m_BlockAllocation.m_Offset = offset;
    6337 }
    6338 
    6339 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6340 {
    6341  VMA_ASSERT(newSize > 0);
    6342  m_Size = newSize;
    6343 }
    6344 
    6345 VkDeviceSize VmaAllocation_T::GetOffset() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_Offset;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return 0;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return 0;
    6356  }
    6357 }
    6358 
    6359 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6360 {
    6361  switch(m_Type)
    6362  {
    6363  case ALLOCATION_TYPE_BLOCK:
    6364  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6365  case ALLOCATION_TYPE_DEDICATED:
    6366  return m_DedicatedAllocation.m_hMemory;
    6367  default:
    6368  VMA_ASSERT(0);
    6369  return VK_NULL_HANDLE;
    6370  }
    6371 }
    6372 
    6373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6374 {
    6375  switch(m_Type)
    6376  {
    6377  case ALLOCATION_TYPE_BLOCK:
    6378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6379  case ALLOCATION_TYPE_DEDICATED:
    6380  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6381  default:
    6382  VMA_ASSERT(0);
    6383  return UINT32_MAX;
    6384  }
    6385 }
    6386 
    6387 void* VmaAllocation_T::GetMappedData() const
    6388 {
    6389  switch(m_Type)
    6390  {
    6391  case ALLOCATION_TYPE_BLOCK:
    6392  if(m_MapCount != 0)
    6393  {
    6394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6395  VMA_ASSERT(pBlockData != VMA_NULL);
    6396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6397  }
    6398  else
    6399  {
    6400  return VMA_NULL;
    6401  }
    6402  break;
    6403  case ALLOCATION_TYPE_DEDICATED:
    6404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6405  return m_DedicatedAllocation.m_pMappedData;
    6406  default:
    6407  VMA_ASSERT(0);
    6408  return VMA_NULL;
    6409  }
    6410 }
    6411 
    6412 bool VmaAllocation_T::CanBecomeLost() const
    6413 {
    6414  switch(m_Type)
    6415  {
    6416  case ALLOCATION_TYPE_BLOCK:
    6417  return m_BlockAllocation.m_CanBecomeLost;
    6418  case ALLOCATION_TYPE_DEDICATED:
    6419  return false;
    6420  default:
    6421  VMA_ASSERT(0);
    6422  return false;
    6423  }
    6424 }
    6425 
    6426 VmaPool VmaAllocation_T::GetPool() const
    6427 {
    6428  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6429  return m_BlockAllocation.m_hPool;
    6430 }
    6431 
    6432 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6433 {
    6434  VMA_ASSERT(CanBecomeLost());
    6435 
    6436  /*
    6437  Warning: This is a carefully designed algorithm.
    6438  Do not modify unless you really know what you're doing :)
    6439  */
    6440  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6441  for(;;)
    6442  {
    6443  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6444  {
    6445  VMA_ASSERT(0);
    6446  return false;
    6447  }
    6448  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6449  {
    6450  return false;
    6451  }
    6452  else // Last use time earlier than current time.
    6453  {
    6454  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6455  {
    6456  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6457  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6458  return true;
    6459  }
    6460  }
    6461  }
    6462 }
    6463 
    6464 #if VMA_STATS_STRING_ENABLED
    6465 
    6466 // Correspond to values of enum VmaSuballocationType.
    6467 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6468  "FREE",
    6469  "UNKNOWN",
    6470  "BUFFER",
    6471  "IMAGE_UNKNOWN",
    6472  "IMAGE_LINEAR",
    6473  "IMAGE_OPTIMAL",
    6474 };
    6475 
    6476 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6477 {
    6478  json.WriteString("Type");
    6479  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6480 
    6481  json.WriteString("Size");
    6482  json.WriteNumber(m_Size);
    6483 
    6484  if(m_pUserData != VMA_NULL)
    6485  {
    6486  json.WriteString("UserData");
    6487  if(IsUserDataString())
    6488  {
    6489  json.WriteString((const char*)m_pUserData);
    6490  }
    6491  else
    6492  {
    6493  json.BeginString();
    6494  json.ContinueString_Pointer(m_pUserData);
    6495  json.EndString();
    6496  }
    6497  }
    6498 
    6499  json.WriteString("CreationFrameIndex");
    6500  json.WriteNumber(m_CreationFrameIndex);
    6501 
    6502  json.WriteString("LastUseFrameIndex");
    6503  json.WriteNumber(GetLastUseFrameIndex());
    6504 
    6505  if(m_BufferImageUsage != 0)
    6506  {
    6507  json.WriteString("Usage");
    6508  json.WriteNumber(m_BufferImageUsage);
    6509  }
    6510 }
    6511 
    6512 #endif
    6513 
    6514 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6515 {
    6516  VMA_ASSERT(IsUserDataString());
    6517  if(m_pUserData != VMA_NULL)
    6518  {
    6519  char* const oldStr = (char*)m_pUserData;
    6520  const size_t oldStrLen = strlen(oldStr);
    6521  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6522  m_pUserData = VMA_NULL;
    6523  }
    6524 }
    6525 
    6526 void VmaAllocation_T::BlockAllocMap()
    6527 {
    6528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6529 
    6530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6531  {
    6532  ++m_MapCount;
    6533  }
    6534  else
    6535  {
    6536  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6537  }
    6538 }
    6539 
    6540 void VmaAllocation_T::BlockAllocUnmap()
    6541 {
    6542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6543 
    6544  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6545  {
    6546  --m_MapCount;
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6555 {
    6556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6557 
    6558  if(m_MapCount != 0)
    6559  {
    6560  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6561  {
    6562  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6563  *ppData = m_DedicatedAllocation.m_pMappedData;
    6564  ++m_MapCount;
    6565  return VK_SUCCESS;
    6566  }
    6567  else
    6568  {
    6569  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6570  return VK_ERROR_MEMORY_MAP_FAILED;
    6571  }
    6572  }
    6573  else
    6574  {
    6575  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6576  hAllocator->m_hDevice,
    6577  m_DedicatedAllocation.m_hMemory,
    6578  0, // offset
    6579  VK_WHOLE_SIZE,
    6580  0, // flags
    6581  ppData);
    6582  if(result == VK_SUCCESS)
    6583  {
    6584  m_DedicatedAllocation.m_pMappedData = *ppData;
    6585  m_MapCount = 1;
    6586  }
    6587  return result;
    6588  }
    6589 }
    6590 
    6591 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6592 {
    6593  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6594 
    6595  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6596  {
    6597  --m_MapCount;
    6598  if(m_MapCount == 0)
    6599  {
    6600  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6602  hAllocator->m_hDevice,
    6603  m_DedicatedAllocation.m_hMemory);
    6604  }
    6605  }
    6606  else
    6607  {
    6608  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6609  }
    6610 }
    6611 
    6612 #if VMA_STATS_STRING_ENABLED
    6613 
    6614 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6615 {
    6616  json.BeginObject();
    6617 
    6618  json.WriteString("Blocks");
    6619  json.WriteNumber(stat.blockCount);
    6620 
    6621  json.WriteString("Allocations");
    6622  json.WriteNumber(stat.allocationCount);
    6623 
    6624  json.WriteString("UnusedRanges");
    6625  json.WriteNumber(stat.unusedRangeCount);
    6626 
    6627  json.WriteString("UsedBytes");
    6628  json.WriteNumber(stat.usedBytes);
    6629 
    6630  json.WriteString("UnusedBytes");
    6631  json.WriteNumber(stat.unusedBytes);
    6632 
    6633  if(stat.allocationCount > 1)
    6634  {
    6635  json.WriteString("AllocationSize");
    6636  json.BeginObject(true);
    6637  json.WriteString("Min");
    6638  json.WriteNumber(stat.allocationSizeMin);
    6639  json.WriteString("Avg");
    6640  json.WriteNumber(stat.allocationSizeAvg);
    6641  json.WriteString("Max");
    6642  json.WriteNumber(stat.allocationSizeMax);
    6643  json.EndObject();
    6644  }
    6645 
    6646  if(stat.unusedRangeCount > 1)
    6647  {
    6648  json.WriteString("UnusedRangeSize");
    6649  json.BeginObject(true);
    6650  json.WriteString("Min");
    6651  json.WriteNumber(stat.unusedRangeSizeMin);
    6652  json.WriteString("Avg");
    6653  json.WriteNumber(stat.unusedRangeSizeAvg);
    6654  json.WriteString("Max");
    6655  json.WriteNumber(stat.unusedRangeSizeMax);
    6656  json.EndObject();
    6657  }
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 #endif // #if VMA_STATS_STRING_ENABLED
    6663 
    6664 struct VmaSuballocationItemSizeLess
    6665 {
    6666  bool operator()(
    6667  const VmaSuballocationList::iterator lhs,
    6668  const VmaSuballocationList::iterator rhs) const
    6669  {
    6670  return lhs->size < rhs->size;
    6671  }
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  VkDeviceSize rhsSize) const
    6675  {
    6676  return lhs->size < rhsSize;
    6677  }
    6678 };
    6679 
    6680 
    6682 // class VmaBlockMetadata
    6683 
    6684 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6685  m_Size(0),
    6686  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6687 {
    6688 }
    6689 
    6690 #if VMA_STATS_STRING_ENABLED
    6691 
    6692 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6693  VkDeviceSize unusedBytes,
    6694  size_t allocationCount,
    6695  size_t unusedRangeCount) const
    6696 {
    6697  json.BeginObject();
    6698 
    6699  json.WriteString("TotalBytes");
    6700  json.WriteNumber(GetSize());
    6701 
    6702  json.WriteString("UnusedBytes");
    6703  json.WriteNumber(unusedBytes);
    6704 
    6705  json.WriteString("Allocations");
    6706  json.WriteNumber((uint64_t)allocationCount);
    6707 
    6708  json.WriteString("UnusedRanges");
    6709  json.WriteNumber((uint64_t)unusedRangeCount);
    6710 
    6711  json.WriteString("Suballocations");
    6712  json.BeginArray();
    6713 }
    6714 
    6715 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6716  VkDeviceSize offset,
    6717  VmaAllocation hAllocation) const
    6718 {
    6719  json.BeginObject(true);
    6720 
    6721  json.WriteString("Offset");
    6722  json.WriteNumber(offset);
    6723 
    6724  hAllocation->PrintParameters(json);
    6725 
    6726  json.EndObject();
    6727 }
    6728 
    6729 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6730  VkDeviceSize offset,
    6731  VkDeviceSize size) const
    6732 {
    6733  json.BeginObject(true);
    6734 
    6735  json.WriteString("Offset");
    6736  json.WriteNumber(offset);
    6737 
    6738  json.WriteString("Type");
    6739  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6740 
    6741  json.WriteString("Size");
    6742  json.WriteNumber(size);
    6743 
    6744  json.EndObject();
    6745 }
    6746 
    6747 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6748 {
    6749  json.EndArray();
    6750  json.EndObject();
    6751 }
    6752 
    6753 #endif // #if VMA_STATS_STRING_ENABLED
    6754 
    6756 // class VmaBlockMetadata_Generic
    6757 
    6758 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6759  VmaBlockMetadata(hAllocator),
    6760  m_FreeCount(0),
    6761  m_SumFreeSize(0),
    6762  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6763  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6764 {
    6765 }
    6766 
    6767 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6768 {
    6769 }
    6770 
    6771 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6772 {
    6773  VmaBlockMetadata::Init(size);
    6774 
    6775  m_FreeCount = 1;
    6776  m_SumFreeSize = size;
    6777 
    6778  VmaSuballocation suballoc = {};
    6779  suballoc.offset = 0;
    6780  suballoc.size = size;
    6781  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6782  suballoc.hAllocation = VK_NULL_HANDLE;
    6783 
    6784  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6785  m_Suballocations.push_back(suballoc);
    6786  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6787  --suballocItem;
    6788  m_FreeSuballocationsBySize.push_back(suballocItem);
    6789 }
    6790 
    6791 bool VmaBlockMetadata_Generic::Validate() const
    6792 {
    6793  VMA_VALIDATE(!m_Suballocations.empty());
    6794 
    6795  // Expected offset of new suballocation as calculated from previous ones.
    6796  VkDeviceSize calculatedOffset = 0;
    6797  // Expected number of free suballocations as calculated from traversing their list.
    6798  uint32_t calculatedFreeCount = 0;
    6799  // Expected sum size of free suballocations as calculated from traversing their list.
    6800  VkDeviceSize calculatedSumFreeSize = 0;
    6801  // Expected number of free suballocations that should be registered in
    6802  // m_FreeSuballocationsBySize calculated from traversing their list.
    6803  size_t freeSuballocationsToRegister = 0;
    6804  // True if previous visited suballocation was free.
    6805  bool prevFree = false;
    6806 
    6807  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6808  suballocItem != m_Suballocations.cend();
    6809  ++suballocItem)
    6810  {
    6811  const VmaSuballocation& subAlloc = *suballocItem;
    6812 
    6813  // Actual offset of this suballocation doesn't match expected one.
    6814  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6815 
    6816  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6817  // Two adjacent free suballocations are invalid. They should be merged.
    6818  VMA_VALIDATE(!prevFree || !currFree);
    6819 
    6820  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6821 
    6822  if(currFree)
    6823  {
    6824  calculatedSumFreeSize += subAlloc.size;
    6825  ++calculatedFreeCount;
    6826  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6827  {
    6828  ++freeSuballocationsToRegister;
    6829  }
    6830 
    6831  // Margin required between allocations - every free space must be at least that large.
    6832  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6833  }
    6834  else
    6835  {
    6836  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6837  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6838 
    6839  // Margin required between allocations - previous allocation must be free.
    6840  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6841  }
    6842 
    6843  calculatedOffset += subAlloc.size;
    6844  prevFree = currFree;
    6845  }
    6846 
    6847  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6848  // match expected one.
    6849  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6850 
    6851  VkDeviceSize lastSize = 0;
    6852  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6853  {
    6854  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6855 
    6856  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6857  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6858  // They must be sorted by size ascending.
    6859  VMA_VALIDATE(suballocItem->size >= lastSize);
    6860 
    6861  lastSize = suballocItem->size;
    6862  }
    6863 
    6864  // Check if totals match calculacted values.
    6865  VMA_VALIDATE(ValidateFreeSuballocationList());
    6866  VMA_VALIDATE(calculatedOffset == GetSize());
    6867  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6868  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6869 
    6870  return true;
    6871 }
    6872 
    6873 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6874 {
    6875  if(!m_FreeSuballocationsBySize.empty())
    6876  {
    6877  return m_FreeSuballocationsBySize.back()->size;
    6878  }
    6879  else
    6880  {
    6881  return 0;
    6882  }
    6883 }
    6884 
    6885 bool VmaBlockMetadata_Generic::IsEmpty() const
    6886 {
    6887  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6888 }
    6889 
    6890 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6891 {
    6892  outInfo.blockCount = 1;
    6893 
    6894  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6895  outInfo.allocationCount = rangeCount - m_FreeCount;
    6896  outInfo.unusedRangeCount = m_FreeCount;
    6897 
    6898  outInfo.unusedBytes = m_SumFreeSize;
    6899  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6900 
    6901  outInfo.allocationSizeMin = UINT64_MAX;
    6902  outInfo.allocationSizeMax = 0;
    6903  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6904  outInfo.unusedRangeSizeMax = 0;
    6905 
    6906  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6907  suballocItem != m_Suballocations.cend();
    6908  ++suballocItem)
    6909  {
    6910  const VmaSuballocation& suballoc = *suballocItem;
    6911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6912  {
    6913  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6914  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6915  }
    6916  else
    6917  {
    6918  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6919  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6920  }
    6921  }
    6922 }
    6923 
    6924 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6925 {
    6926  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6927 
    6928  inoutStats.size += GetSize();
    6929  inoutStats.unusedSize += m_SumFreeSize;
    6930  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6931  inoutStats.unusedRangeCount += m_FreeCount;
    6932  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6933 }
    6934 
    6935 #if VMA_STATS_STRING_ENABLED
    6936 
    6937 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6938 {
    6939  PrintDetailedMap_Begin(json,
    6940  m_SumFreeSize, // unusedBytes
    6941  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6942  m_FreeCount); // unusedRangeCount
    6943 
    6944  size_t i = 0;
    6945  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6946  suballocItem != m_Suballocations.cend();
    6947  ++suballocItem, ++i)
    6948  {
    6949  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6950  {
    6951  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6952  }
    6953  else
    6954  {
    6955  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6956  }
    6957  }
    6958 
    6959  PrintDetailedMap_End(json);
    6960 }
    6961 
    6962 #endif // #if VMA_STATS_STRING_ENABLED
    6963 
    6964 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6965  uint32_t currentFrameIndex,
    6966  uint32_t frameInUseCount,
    6967  VkDeviceSize bufferImageGranularity,
    6968  VkDeviceSize allocSize,
    6969  VkDeviceSize allocAlignment,
    6970  bool upperAddress,
    6971  VmaSuballocationType allocType,
    6972  bool canMakeOtherLost,
    6973  uint32_t strategy,
    6974  VmaAllocationRequest* pAllocationRequest)
    6975 {
    6976  VMA_ASSERT(allocSize > 0);
    6977  VMA_ASSERT(!upperAddress);
    6978  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6979  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6980  VMA_HEAVY_ASSERT(Validate());
    6981 
    6982  // There is not enough total free space in this block to fullfill the request: Early return.
    6983  if(canMakeOtherLost == false &&
    6984  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6985  {
    6986  return false;
    6987  }
    6988 
    6989  // New algorithm, efficiently searching freeSuballocationsBySize.
    6990  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6991  if(freeSuballocCount > 0)
    6992  {
    6994  {
    6995  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6996  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6997  m_FreeSuballocationsBySize.data(),
    6998  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6999  allocSize + 2 * VMA_DEBUG_MARGIN,
    7000  VmaSuballocationItemSizeLess());
    7001  size_t index = it - m_FreeSuballocationsBySize.data();
    7002  for(; index < freeSuballocCount; ++index)
    7003  {
    7004  if(CheckAllocation(
    7005  currentFrameIndex,
    7006  frameInUseCount,
    7007  bufferImageGranularity,
    7008  allocSize,
    7009  allocAlignment,
    7010  allocType,
    7011  m_FreeSuballocationsBySize[index],
    7012  false, // canMakeOtherLost
    7013  &pAllocationRequest->offset,
    7014  &pAllocationRequest->itemsToMakeLostCount,
    7015  &pAllocationRequest->sumFreeSize,
    7016  &pAllocationRequest->sumItemSize))
    7017  {
    7018  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7019  return true;
    7020  }
    7021  }
    7022  }
    7023  else // WORST_FIT, FIRST_FIT
    7024  {
    7025  // Search staring from biggest suballocations.
    7026  for(size_t index = freeSuballocCount; index--; )
    7027  {
    7028  if(CheckAllocation(
    7029  currentFrameIndex,
    7030  frameInUseCount,
    7031  bufferImageGranularity,
    7032  allocSize,
    7033  allocAlignment,
    7034  allocType,
    7035  m_FreeSuballocationsBySize[index],
    7036  false, // canMakeOtherLost
    7037  &pAllocationRequest->offset,
    7038  &pAllocationRequest->itemsToMakeLostCount,
    7039  &pAllocationRequest->sumFreeSize,
    7040  &pAllocationRequest->sumItemSize))
    7041  {
    7042  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7043  return true;
    7044  }
    7045  }
    7046  }
    7047  }
    7048 
    7049  if(canMakeOtherLost)
    7050  {
    7051  // Brute-force algorithm. TODO: Come up with something better.
    7052 
    7053  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7054  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7055 
    7056  VmaAllocationRequest tmpAllocRequest = {};
    7057  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7058  suballocIt != m_Suballocations.end();
    7059  ++suballocIt)
    7060  {
    7061  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7062  suballocIt->hAllocation->CanBecomeLost())
    7063  {
    7064  if(CheckAllocation(
    7065  currentFrameIndex,
    7066  frameInUseCount,
    7067  bufferImageGranularity,
    7068  allocSize,
    7069  allocAlignment,
    7070  allocType,
    7071  suballocIt,
    7072  canMakeOtherLost,
    7073  &tmpAllocRequest.offset,
    7074  &tmpAllocRequest.itemsToMakeLostCount,
    7075  &tmpAllocRequest.sumFreeSize,
    7076  &tmpAllocRequest.sumItemSize))
    7077  {
    7078  tmpAllocRequest.item = suballocIt;
    7079 
    7080  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7082  {
    7083  *pAllocationRequest = tmpAllocRequest;
    7084  }
    7085  }
    7086  }
    7087  }
    7088 
    7089  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7090  {
    7091  return true;
    7092  }
    7093  }
    7094 
    7095  return false;
    7096 }
    7097 
    7098 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7099  uint32_t currentFrameIndex,
    7100  uint32_t frameInUseCount,
    7101  VmaAllocationRequest* pAllocationRequest)
    7102 {
    7103  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7104  {
    7105  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7106  {
    7107  ++pAllocationRequest->item;
    7108  }
    7109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7110  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7111  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7112  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7113  {
    7114  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7115  --pAllocationRequest->itemsToMakeLostCount;
    7116  }
    7117  else
    7118  {
    7119  return false;
    7120  }
    7121  }
    7122 
    7123  VMA_HEAVY_ASSERT(Validate());
    7124  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7125  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7126 
    7127  return true;
    7128 }
    7129 
    7130 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7131 {
    7132  uint32_t lostAllocationCount = 0;
    7133  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7134  it != m_Suballocations.end();
    7135  ++it)
    7136  {
    7137  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7138  it->hAllocation->CanBecomeLost() &&
    7139  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7140  {
    7141  it = FreeSuballocation(it);
    7142  ++lostAllocationCount;
    7143  }
    7144  }
    7145  return lostAllocationCount;
    7146 }
    7147 
    7148 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7149 {
    7150  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7151  it != m_Suballocations.end();
    7152  ++it)
    7153  {
    7154  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7155  {
    7156  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7157  {
    7158  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7159  return VK_ERROR_VALIDATION_FAILED_EXT;
    7160  }
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  }
    7167  }
    7168 
    7169  return VK_SUCCESS;
    7170 }
    7171 
    7172 void VmaBlockMetadata_Generic::Alloc(
    7173  const VmaAllocationRequest& request,
    7174  VmaSuballocationType type,
    7175  VkDeviceSize allocSize,
    7176  bool upperAddress,
    7177  VmaAllocation hAllocation)
    7178 {
    7179  VMA_ASSERT(!upperAddress);
    7180  VMA_ASSERT(request.item != m_Suballocations.end());
    7181  VmaSuballocation& suballoc = *request.item;
    7182  // Given suballocation is a free block.
    7183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7184  // Given offset is inside this suballocation.
    7185  VMA_ASSERT(request.offset >= suballoc.offset);
    7186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7189 
    7190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7191  // it to become used.
    7192  UnregisterFreeSuballocation(request.item);
    7193 
    7194  suballoc.offset = request.offset;
    7195  suballoc.size = allocSize;
    7196  suballoc.type = type;
    7197  suballoc.hAllocation = hAllocation;
    7198 
    7199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7200  if(paddingEnd)
    7201  {
    7202  VmaSuballocation paddingSuballoc = {};
    7203  paddingSuballoc.offset = request.offset + allocSize;
    7204  paddingSuballoc.size = paddingEnd;
    7205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7206  VmaSuballocationList::iterator next = request.item;
    7207  ++next;
    7208  const VmaSuballocationList::iterator paddingEndItem =
    7209  m_Suballocations.insert(next, paddingSuballoc);
    7210  RegisterFreeSuballocation(paddingEndItem);
    7211  }
    7212 
    7213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7214  if(paddingBegin)
    7215  {
    7216  VmaSuballocation paddingSuballoc = {};
    7217  paddingSuballoc.offset = request.offset - paddingBegin;
    7218  paddingSuballoc.size = paddingBegin;
    7219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7220  const VmaSuballocationList::iterator paddingBeginItem =
    7221  m_Suballocations.insert(request.item, paddingSuballoc);
    7222  RegisterFreeSuballocation(paddingBeginItem);
    7223  }
    7224 
    7225  // Update totals.
    7226  m_FreeCount = m_FreeCount - 1;
    7227  if(paddingBegin > 0)
    7228  {
    7229  ++m_FreeCount;
    7230  }
    7231  if(paddingEnd > 0)
    7232  {
    7233  ++m_FreeCount;
    7234  }
    7235  m_SumFreeSize -= allocSize;
    7236 }
    7237 
    7238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7239 {
    7240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7241  suballocItem != m_Suballocations.end();
    7242  ++suballocItem)
    7243  {
    7244  VmaSuballocation& suballoc = *suballocItem;
    7245  if(suballoc.hAllocation == allocation)
    7246  {
    7247  FreeSuballocation(suballocItem);
    7248  VMA_HEAVY_ASSERT(Validate());
    7249  return;
    7250  }
    7251  }
    7252  VMA_ASSERT(0 && "Not found!");
    7253 }
    7254 
    7255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7256 {
    7257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7258  suballocItem != m_Suballocations.end();
    7259  ++suballocItem)
    7260  {
    7261  VmaSuballocation& suballoc = *suballocItem;
    7262  if(suballoc.offset == offset)
    7263  {
    7264  FreeSuballocation(suballocItem);
    7265  return;
    7266  }
    7267  }
    7268  VMA_ASSERT(0 && "Not found!");
    7269 }
    7270 
    7271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7272 {
    7273  typedef VmaSuballocationList::iterator iter_type;
    7274  for(iter_type suballocItem = m_Suballocations.begin();
    7275  suballocItem != m_Suballocations.end();
    7276  ++suballocItem)
    7277  {
    7278  VmaSuballocation& suballoc = *suballocItem;
    7279  if(suballoc.hAllocation == alloc)
    7280  {
    7281  iter_type nextItem = suballocItem;
    7282  ++nextItem;
    7283 
    7284  // Should have been ensured on higher level.
    7285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7286 
    7287  // Shrinking.
    7288  if(newSize < alloc->GetSize())
    7289  {
    7290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7291 
    7292  // There is next item.
    7293  if(nextItem != m_Suballocations.end())
    7294  {
    7295  // Next item is free.
    7296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7297  {
    7298  // Grow this next item backward.
    7299  UnregisterFreeSuballocation(nextItem);
    7300  nextItem->offset -= sizeDiff;
    7301  nextItem->size += sizeDiff;
    7302  RegisterFreeSuballocation(nextItem);
    7303  }
    7304  // Next item is not free.
    7305  else
    7306  {
    7307  // Create free item after current one.
    7308  VmaSuballocation newFreeSuballoc;
    7309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7310  newFreeSuballoc.offset = suballoc.offset + newSize;
    7311  newFreeSuballoc.size = sizeDiff;
    7312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7314  RegisterFreeSuballocation(newFreeSuballocIt);
    7315 
    7316  ++m_FreeCount;
    7317  }
    7318  }
    7319  // This is the last item.
    7320  else
    7321  {
    7322  // Create free item at the end.
    7323  VmaSuballocation newFreeSuballoc;
    7324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7325  newFreeSuballoc.offset = suballoc.offset + newSize;
    7326  newFreeSuballoc.size = sizeDiff;
    7327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7328  m_Suballocations.push_back(newFreeSuballoc);
    7329 
    7330  iter_type newFreeSuballocIt = m_Suballocations.end();
    7331  RegisterFreeSuballocation(--newFreeSuballocIt);
    7332 
    7333  ++m_FreeCount;
    7334  }
    7335 
    7336  suballoc.size = newSize;
    7337  m_SumFreeSize += sizeDiff;
    7338  }
    7339  // Growing.
    7340  else
    7341  {
    7342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7343 
    7344  // There is next item.
    7345  if(nextItem != m_Suballocations.end())
    7346  {
    7347  // Next item is free.
    7348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7349  {
    7350  // There is not enough free space, including margin.
    7351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7352  {
    7353  return false;
    7354  }
    7355 
    7356  // There is more free space than required.
    7357  if(nextItem->size > sizeDiff)
    7358  {
    7359  // Move and shrink this next item.
    7360  UnregisterFreeSuballocation(nextItem);
    7361  nextItem->offset += sizeDiff;
    7362  nextItem->size -= sizeDiff;
    7363  RegisterFreeSuballocation(nextItem);
    7364  }
    7365  // There is exactly the amount of free space required.
    7366  else
    7367  {
    7368  // Remove this next free item.
    7369  UnregisterFreeSuballocation(nextItem);
    7370  m_Suballocations.erase(nextItem);
    7371  --m_FreeCount;
    7372  }
    7373  }
    7374  // Next item is not free - there is no space to grow.
    7375  else
    7376  {
    7377  return false;
    7378  }
    7379  }
    7380  // This is the last item - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385 
    7386  suballoc.size = newSize;
    7387  m_SumFreeSize -= sizeDiff;
    7388  }
    7389 
    7390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7391  return true;
    7392  }
    7393  }
    7394  VMA_ASSERT(0 && "Not found!");
    7395  return false;
    7396 }
    7397 
    7398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7399 {
    7400  VkDeviceSize lastSize = 0;
    7401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7402  {
    7403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7404 
    7405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7407  VMA_VALIDATE(it->size >= lastSize);
    7408  lastSize = it->size;
    7409  }
    7410  return true;
    7411 }
    7412 
    7413 bool VmaBlockMetadata_Generic::CheckAllocation(
    7414  uint32_t currentFrameIndex,
    7415  uint32_t frameInUseCount,
    7416  VkDeviceSize bufferImageGranularity,
    7417  VkDeviceSize allocSize,
    7418  VkDeviceSize allocAlignment,
    7419  VmaSuballocationType allocType,
    7420  VmaSuballocationList::const_iterator suballocItem,
    7421  bool canMakeOtherLost,
    7422  VkDeviceSize* pOffset,
    7423  size_t* itemsToMakeLostCount,
    7424  VkDeviceSize* pSumFreeSize,
    7425  VkDeviceSize* pSumItemSize) const
    7426 {
    7427  VMA_ASSERT(allocSize > 0);
    7428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7430  VMA_ASSERT(pOffset != VMA_NULL);
    7431 
    7432  *itemsToMakeLostCount = 0;
    7433  *pSumFreeSize = 0;
    7434  *pSumItemSize = 0;
    7435 
    7436  if(canMakeOtherLost)
    7437  {
    7438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7439  {
    7440  *pSumFreeSize = suballocItem->size;
    7441  }
    7442  else
    7443  {
    7444  if(suballocItem->hAllocation->CanBecomeLost() &&
    7445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7446  {
    7447  ++*itemsToMakeLostCount;
    7448  *pSumItemSize = suballocItem->size;
    7449  }
    7450  else
    7451  {
    7452  return false;
    7453  }
    7454  }
    7455 
    7456  // Remaining size is too small for this request: Early return.
    7457  if(GetSize() - suballocItem->offset < allocSize)
    7458  {
    7459  return false;
    7460  }
    7461 
    7462  // Start from offset equal to beginning of this suballocation.
    7463  *pOffset = suballocItem->offset;
    7464 
    7465  // Apply VMA_DEBUG_MARGIN at the beginning.
    7466  if(VMA_DEBUG_MARGIN > 0)
    7467  {
    7468  *pOffset += VMA_DEBUG_MARGIN;
    7469  }
    7470 
    7471  // Apply alignment.
    7472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7473 
    7474  // Check previous suballocations for BufferImageGranularity conflicts.
    7475  // Make bigger alignment if necessary.
    7476  if(bufferImageGranularity > 1)
    7477  {
    7478  bool bufferImageGranularityConflict = false;
    7479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7480  while(prevSuballocItem != m_Suballocations.cbegin())
    7481  {
    7482  --prevSuballocItem;
    7483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7485  {
    7486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7487  {
    7488  bufferImageGranularityConflict = true;
    7489  break;
    7490  }
    7491  }
    7492  else
    7493  // Already on previous page.
    7494  break;
    7495  }
    7496  if(bufferImageGranularityConflict)
    7497  {
    7498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7499  }
    7500  }
    7501 
    7502  // Now that we have final *pOffset, check if we are past suballocItem.
    7503  // If yes, return false - this function should be called for another suballocItem as starting point.
    7504  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7505  {
    7506  return false;
    7507  }
    7508 
    7509  // Calculate padding at the beginning based on current offset.
    7510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7511 
    7512  // Calculate required margin at the end.
    7513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7514 
    7515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7516  // Another early return check.
    7517  if(suballocItem->offset + totalSize > GetSize())
    7518  {
    7519  return false;
    7520  }
    7521 
    7522  // Advance lastSuballocItem until desired size is reached.
    7523  // Update itemsToMakeLostCount.
    7524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7525  if(totalSize > suballocItem->size)
    7526  {
    7527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7528  while(remainingSize > 0)
    7529  {
    7530  ++lastSuballocItem;
    7531  if(lastSuballocItem == m_Suballocations.cend())
    7532  {
    7533  return false;
    7534  }
    7535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7536  {
    7537  *pSumFreeSize += lastSuballocItem->size;
    7538  }
    7539  else
    7540  {
    7541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7544  {
    7545  ++*itemsToMakeLostCount;
    7546  *pSumItemSize += lastSuballocItem->size;
    7547  }
    7548  else
    7549  {
    7550  return false;
    7551  }
    7552  }
    7553  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7554  remainingSize - lastSuballocItem->size : 0;
    7555  }
    7556  }
    7557 
    7558  // Check next suballocations for BufferImageGranularity conflicts.
    7559  // If conflict exists, we must mark more allocations lost or fail.
    7560  if(bufferImageGranularity > 1)
    7561  {
    7562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7563  ++nextSuballocItem;
    7564  while(nextSuballocItem != m_Suballocations.cend())
    7565  {
    7566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7568  {
    7569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7570  {
    7571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7574  {
    7575  ++*itemsToMakeLostCount;
    7576  }
    7577  else
    7578  {
    7579  return false;
    7580  }
    7581  }
    7582  }
    7583  else
    7584  {
    7585  // Already on next page.
    7586  break;
    7587  }
    7588  ++nextSuballocItem;
    7589  }
    7590  }
    7591  }
    7592  else
    7593  {
    7594  const VmaSuballocation& suballoc = *suballocItem;
    7595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7596 
    7597  *pSumFreeSize = suballoc.size;
    7598 
    7599  // Size of this suballocation is too small for this request: Early return.
    7600  if(suballoc.size < allocSize)
    7601  {
    7602  return false;
    7603  }
    7604 
    7605  // Start from offset equal to beginning of this suballocation.
    7606  *pOffset = suballoc.offset;
    7607 
    7608  // Apply VMA_DEBUG_MARGIN at the beginning.
    7609  if(VMA_DEBUG_MARGIN > 0)
    7610  {
    7611  *pOffset += VMA_DEBUG_MARGIN;
    7612  }
    7613 
    7614  // Apply alignment.
    7615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7616 
    7617  // Check previous suballocations for BufferImageGranularity conflicts.
    7618  // Make bigger alignment if necessary.
    7619  if(bufferImageGranularity > 1)
    7620  {
    7621  bool bufferImageGranularityConflict = false;
    7622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7623  while(prevSuballocItem != m_Suballocations.cbegin())
    7624  {
    7625  --prevSuballocItem;
    7626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7628  {
    7629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7630  {
    7631  bufferImageGranularityConflict = true;
    7632  break;
    7633  }
    7634  }
    7635  else
    7636  // Already on previous page.
    7637  break;
    7638  }
    7639  if(bufferImageGranularityConflict)
    7640  {
    7641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7642  }
    7643  }
    7644 
    7645  // Calculate padding at the beginning based on current offset.
    7646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7647 
    7648  // Calculate required margin at the end.
    7649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7650 
    7651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7653  {
    7654  return false;
    7655  }
    7656 
    7657  // Check next suballocations for BufferImageGranularity conflicts.
    7658  // If conflict exists, allocation cannot be made here.
    7659  if(bufferImageGranularity > 1)
    7660  {
    7661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7662  ++nextSuballocItem;
    7663  while(nextSuballocItem != m_Suballocations.cend())
    7664  {
    7665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7667  {
    7668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7669  {
    7670  return false;
    7671  }
    7672  }
    7673  else
    7674  {
    7675  // Already on next page.
    7676  break;
    7677  }
    7678  ++nextSuballocItem;
    7679  }
    7680  }
    7681  }
    7682 
    7683  // All tests passed: Success. pOffset is already filled.
    7684  return true;
    7685 }
    7686 
    7687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7688 {
    7689  VMA_ASSERT(item != m_Suballocations.end());
    7690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VmaSuballocationList::iterator nextItem = item;
    7693  ++nextItem;
    7694  VMA_ASSERT(nextItem != m_Suballocations.end());
    7695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  item->size += nextItem->size;
    7698  --m_FreeCount;
    7699  m_Suballocations.erase(nextItem);
    7700 }
    7701 
    7702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7703 {
    7704  // Change this suballocation to be marked as free.
    7705  VmaSuballocation& suballoc = *suballocItem;
    7706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7707  suballoc.hAllocation = VK_NULL_HANDLE;
    7708 
    7709  // Update totals.
    7710  ++m_FreeCount;
    7711  m_SumFreeSize += suballoc.size;
    7712 
    7713  // Merge with previous and/or next suballocation if it's also free.
    7714  bool mergeWithNext = false;
    7715  bool mergeWithPrev = false;
    7716 
    7717  VmaSuballocationList::iterator nextItem = suballocItem;
    7718  ++nextItem;
    7719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7720  {
    7721  mergeWithNext = true;
    7722  }
    7723 
    7724  VmaSuballocationList::iterator prevItem = suballocItem;
    7725  if(suballocItem != m_Suballocations.begin())
    7726  {
    7727  --prevItem;
    7728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7729  {
    7730  mergeWithPrev = true;
    7731  }
    7732  }
    7733 
    7734  if(mergeWithNext)
    7735  {
    7736  UnregisterFreeSuballocation(nextItem);
    7737  MergeFreeWithNext(suballocItem);
    7738  }
    7739 
    7740  if(mergeWithPrev)
    7741  {
    7742  UnregisterFreeSuballocation(prevItem);
    7743  MergeFreeWithNext(prevItem);
    7744  RegisterFreeSuballocation(prevItem);
    7745  return prevItem;
    7746  }
    7747  else
    7748  {
    7749  RegisterFreeSuballocation(suballocItem);
    7750  return suballocItem;
    7751  }
    7752 }
    7753 
    7754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7755 {
    7756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7757  VMA_ASSERT(item->size > 0);
    7758 
    7759  // You may want to enable this validation at the beginning or at the end of
    7760  // this function, depending on what do you want to check.
    7761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7762 
    7763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7764  {
    7765  if(m_FreeSuballocationsBySize.empty())
    7766  {
    7767  m_FreeSuballocationsBySize.push_back(item);
    7768  }
    7769  else
    7770  {
    7771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7772  }
    7773  }
    7774 
    7775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7776 }
    7777 
    7778 
    7779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7780 {
    7781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7782  VMA_ASSERT(item->size > 0);
    7783 
    7784  // You may want to enable this validation at the beginning or at the end of
    7785  // this function, depending on what do you want to check.
    7786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7787 
    7788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7789  {
    7790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7791  m_FreeSuballocationsBySize.data(),
    7792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7793  item,
    7794  VmaSuballocationItemSizeLess());
    7795  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7796  index < m_FreeSuballocationsBySize.size();
    7797  ++index)
    7798  {
    7799  if(m_FreeSuballocationsBySize[index] == item)
    7800  {
    7801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7802  return;
    7803  }
    7804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7805  }
    7806  VMA_ASSERT(0 && "Not found.");
    7807  }
    7808 
    7809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7810 }
    7811 
    7813 // class VmaBlockMetadata_Linear
    7814 
    7815 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7816  VmaBlockMetadata(hAllocator),
    7817  m_SumFreeSize(0),
    7818  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7819  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7820  m_1stVectorIndex(0),
    7821  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7822  m_1stNullItemsBeginCount(0),
    7823  m_1stNullItemsMiddleCount(0),
    7824  m_2ndNullItemsCount(0)
    7825 {
    7826 }
    7827 
    7828 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7829 {
    7830 }
    7831 
    7832 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7833 {
    7834  VmaBlockMetadata::Init(size);
    7835  m_SumFreeSize = size;
    7836 }
    7837 
    7838 bool VmaBlockMetadata_Linear::Validate() const
    7839 {
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842 
    7843  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7844  VMA_VALIDATE(!suballocations1st.empty() ||
    7845  suballocations2nd.empty() ||
    7846  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7847 
    7848  if(!suballocations1st.empty())
    7849  {
    7850  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7851  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7852  // Null item at the end should be just pop_back().
    7853  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7854  }
    7855  if(!suballocations2nd.empty())
    7856  {
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860 
    7861  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7862  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7863 
    7864  VkDeviceSize sumUsedSize = 0;
    7865  const size_t suballoc1stCount = suballocations1st.size();
    7866  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7867 
    7868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7869  {
    7870  const size_t suballoc2ndCount = suballocations2nd.size();
    7871  size_t nullItem2ndCount = 0;
    7872  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7873  {
    7874  const VmaSuballocation& suballoc = suballocations2nd[i];
    7875  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7876 
    7877  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7878  VMA_VALIDATE(suballoc.offset >= offset);
    7879 
    7880  if(!currFree)
    7881  {
    7882  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7883  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7884  sumUsedSize += suballoc.size;
    7885  }
    7886  else
    7887  {
    7888  ++nullItem2ndCount;
    7889  }
    7890 
    7891  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7892  }
    7893 
    7894  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7895  }
    7896 
    7897  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7898  {
    7899  const VmaSuballocation& suballoc = suballocations1st[i];
    7900  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7901  suballoc.hAllocation == VK_NULL_HANDLE);
    7902  }
    7903 
    7904  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7905 
    7906  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7907  {
    7908  const VmaSuballocation& suballoc = suballocations1st[i];
    7909  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7910 
    7911  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7912  VMA_VALIDATE(suballoc.offset >= offset);
    7913  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7914 
    7915  if(!currFree)
    7916  {
    7917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7919  sumUsedSize += suballoc.size;
    7920  }
    7921  else
    7922  {
    7923  ++nullItem1stCount;
    7924  }
    7925 
    7926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7927  }
    7928  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7929 
    7930  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7931  {
    7932  const size_t suballoc2ndCount = suballocations2nd.size();
    7933  size_t nullItem2ndCount = 0;
    7934  for(size_t i = suballoc2ndCount; i--; )
    7935  {
    7936  const VmaSuballocation& suballoc = suballocations2nd[i];
    7937  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7938 
    7939  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7940  VMA_VALIDATE(suballoc.offset >= offset);
    7941 
    7942  if(!currFree)
    7943  {
    7944  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7945  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7946  sumUsedSize += suballoc.size;
    7947  }
    7948  else
    7949  {
    7950  ++nullItem2ndCount;
    7951  }
    7952 
    7953  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7954  }
    7955 
    7956  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7957  }
    7958 
    7959  VMA_VALIDATE(offset <= GetSize());
    7960  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7961 
    7962  return true;
    7963 }
    7964 
    7965 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7966 {
    7967  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7968  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7969 }
    7970 
    7971 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7972 {
    7973  const VkDeviceSize size = GetSize();
    7974 
    7975  /*
    7976  We don't consider gaps inside allocation vectors with freed allocations because
    7977  they are not suitable for reuse in linear allocator. We consider only space that
    7978  is available for new allocations.
    7979  */
    7980  if(IsEmpty())
    7981  {
    7982  return size;
    7983  }
    7984 
    7985  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7986 
    7987  switch(m_2ndVectorMode)
    7988  {
    7989  case SECOND_VECTOR_EMPTY:
    7990  /*
    7991  Available space is after end of 1st, as well as before beginning of 1st (which
    7992  whould make it a ring buffer).
    7993  */
    7994  {
    7995  const size_t suballocations1stCount = suballocations1st.size();
    7996  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7997  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7998  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7999  return VMA_MAX(
    8000  firstSuballoc.offset,
    8001  size - (lastSuballoc.offset + lastSuballoc.size));
    8002  }
    8003  break;
    8004 
    8005  case SECOND_VECTOR_RING_BUFFER:
    8006  /*
    8007  Available space is only between end of 2nd and beginning of 1st.
    8008  */
    8009  {
    8010  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8011  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8012  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8013  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8014  }
    8015  break;
    8016 
    8017  case SECOND_VECTOR_DOUBLE_STACK:
    8018  /*
    8019  Available space is only between end of 1st and top of 2nd.
    8020  */
    8021  {
    8022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8023  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8024  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8025  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8026  }
    8027  break;
    8028 
    8029  default:
    8030  VMA_ASSERT(0);
    8031  return 0;
    8032  }
    8033 }
    8034 
    8035 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8036 {
    8037  const VkDeviceSize size = GetSize();
    8038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8039  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8040  const size_t suballoc1stCount = suballocations1st.size();
    8041  const size_t suballoc2ndCount = suballocations2nd.size();
    8042 
    8043  outInfo.blockCount = 1;
    8044  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8045  outInfo.unusedRangeCount = 0;
    8046  outInfo.usedBytes = 0;
    8047  outInfo.allocationSizeMin = UINT64_MAX;
    8048  outInfo.allocationSizeMax = 0;
    8049  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8050  outInfo.unusedRangeSizeMax = 0;
    8051 
    8052  VkDeviceSize lastOffset = 0;
    8053 
    8054  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8055  {
    8056  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8057  size_t nextAlloc2ndIndex = 0;
    8058  while(lastOffset < freeSpace2ndTo1stEnd)
    8059  {
    8060  // Find next non-null allocation or move nextAllocIndex to the end.
    8061  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8062  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8063  {
    8064  ++nextAlloc2ndIndex;
    8065  }
    8066 
    8067  // Found non-null allocation.
    8068  if(nextAlloc2ndIndex < suballoc2ndCount)
    8069  {
    8070  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8071 
    8072  // 1. Process free space before this allocation.
    8073  if(lastOffset < suballoc.offset)
    8074  {
    8075  // There is free space from lastOffset to suballoc.offset.
    8076  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8077  ++outInfo.unusedRangeCount;
    8078  outInfo.unusedBytes += unusedRangeSize;
    8079  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8080  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  outInfo.usedBytes += suballoc.size;
    8086  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8087  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8088 
    8089  // 3. Prepare for next iteration.
    8090  lastOffset = suballoc.offset + suballoc.size;
    8091  ++nextAlloc2ndIndex;
    8092  }
    8093  // We are at the end.
    8094  else
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  if(lastOffset < freeSpace2ndTo1stEnd)
    8098  {
    8099  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8100  ++outInfo.unusedRangeCount;
    8101  outInfo.unusedBytes += unusedRangeSize;
    8102  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8103  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8104  }
    8105 
    8106  // End of loop.
    8107  lastOffset = freeSpace2ndTo1stEnd;
    8108  }
    8109  }
    8110  }
    8111 
    8112  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8113  const VkDeviceSize freeSpace1stTo2ndEnd =
    8114  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8115  while(lastOffset < freeSpace1stTo2ndEnd)
    8116  {
    8117  // Find next non-null allocation or move nextAllocIndex to the end.
    8118  while(nextAlloc1stIndex < suballoc1stCount &&
    8119  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8120  {
    8121  ++nextAlloc1stIndex;
    8122  }
    8123 
    8124  // Found non-null allocation.
    8125  if(nextAlloc1stIndex < suballoc1stCount)
    8126  {
    8127  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8128 
    8129  // 1. Process free space before this allocation.
    8130  if(lastOffset < suballoc.offset)
    8131  {
    8132  // There is free space from lastOffset to suballoc.offset.
    8133  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8134  ++outInfo.unusedRangeCount;
    8135  outInfo.unusedBytes += unusedRangeSize;
    8136  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8137  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8138  }
    8139 
    8140  // 2. Process this allocation.
    8141  // There is allocation with suballoc.offset, suballoc.size.
    8142  outInfo.usedBytes += suballoc.size;
    8143  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8144  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8145 
    8146  // 3. Prepare for next iteration.
    8147  lastOffset = suballoc.offset + suballoc.size;
    8148  ++nextAlloc1stIndex;
    8149  }
    8150  // We are at the end.
    8151  else
    8152  {
    8153  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8154  if(lastOffset < freeSpace1stTo2ndEnd)
    8155  {
    8156  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8157  ++outInfo.unusedRangeCount;
    8158  outInfo.unusedBytes += unusedRangeSize;
    8159  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8160  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8161  }
    8162 
    8163  // End of loop.
    8164  lastOffset = freeSpace1stTo2ndEnd;
    8165  }
    8166  }
    8167 
    8168  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8169  {
    8170  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8171  while(lastOffset < size)
    8172  {
    8173  // Find next non-null allocation or move nextAllocIndex to the end.
    8174  while(nextAlloc2ndIndex != SIZE_MAX &&
    8175  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8176  {
    8177  --nextAlloc2ndIndex;
    8178  }
    8179 
    8180  // Found non-null allocation.
    8181  if(nextAlloc2ndIndex != SIZE_MAX)
    8182  {
    8183  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8184 
    8185  // 1. Process free space before this allocation.
    8186  if(lastOffset < suballoc.offset)
    8187  {
    8188  // There is free space from lastOffset to suballoc.offset.
    8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8190  ++outInfo.unusedRangeCount;
    8191  outInfo.unusedBytes += unusedRangeSize;
    8192  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8193  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8194  }
    8195 
    8196  // 2. Process this allocation.
    8197  // There is allocation with suballoc.offset, suballoc.size.
    8198  outInfo.usedBytes += suballoc.size;
    8199  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8200  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8201 
    8202  // 3. Prepare for next iteration.
    8203  lastOffset = suballoc.offset + suballoc.size;
    8204  --nextAlloc2ndIndex;
    8205  }
    8206  // We are at the end.
    8207  else
    8208  {
    8209  // There is free space from lastOffset to size.
    8210  if(lastOffset < size)
    8211  {
    8212  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8213  ++outInfo.unusedRangeCount;
    8214  outInfo.unusedBytes += unusedRangeSize;
    8215  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8216  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8217  }
    8218 
    8219  // End of loop.
    8220  lastOffset = size;
    8221  }
    8222  }
    8223  }
    8224 
    8225  outInfo.unusedBytes = size - outInfo.usedBytes;
    8226 }
    8227 
    8228 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8229 {
    8230  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8231  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8232  const VkDeviceSize size = GetSize();
    8233  const size_t suballoc1stCount = suballocations1st.size();
    8234  const size_t suballoc2ndCount = suballocations2nd.size();
    8235 
    8236  inoutStats.size += size;
    8237 
    8238  VkDeviceSize lastOffset = 0;
    8239 
    8240  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8241  {
    8242  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8243  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8244  while(lastOffset < freeSpace2ndTo1stEnd)
    8245  {
    8246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8247  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8249  {
    8250  ++nextAlloc2ndIndex;
    8251  }
    8252 
    8253  // Found non-null allocation.
    8254  if(nextAlloc2ndIndex < suballoc2ndCount)
    8255  {
    8256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8257 
    8258  // 1. Process free space before this allocation.
    8259  if(lastOffset < suballoc.offset)
    8260  {
    8261  // There is free space from lastOffset to suballoc.offset.
    8262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8263  inoutStats.unusedSize += unusedRangeSize;
    8264  ++inoutStats.unusedRangeCount;
    8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8266  }
    8267 
    8268  // 2. Process this allocation.
    8269  // There is allocation with suballoc.offset, suballoc.size.
    8270  ++inoutStats.allocationCount;
    8271 
    8272  // 3. Prepare for next iteration.
    8273  lastOffset = suballoc.offset + suballoc.size;
    8274  ++nextAlloc2ndIndex;
    8275  }
    8276  // We are at the end.
    8277  else
    8278  {
    8279  if(lastOffset < freeSpace2ndTo1stEnd)
    8280  {
    8281  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8282  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8283  inoutStats.unusedSize += unusedRangeSize;
    8284  ++inoutStats.unusedRangeCount;
    8285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8286  }
    8287 
    8288  // End of loop.
    8289  lastOffset = freeSpace2ndTo1stEnd;
    8290  }
    8291  }
    8292  }
    8293 
    8294  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8295  const VkDeviceSize freeSpace1stTo2ndEnd =
    8296  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8297  while(lastOffset < freeSpace1stTo2ndEnd)
    8298  {
    8299  // Find next non-null allocation or move nextAllocIndex to the end.
    8300  while(nextAlloc1stIndex < suballoc1stCount &&
    8301  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8302  {
    8303  ++nextAlloc1stIndex;
    8304  }
    8305 
    8306  // Found non-null allocation.
    8307  if(nextAlloc1stIndex < suballoc1stCount)
    8308  {
    8309  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8310 
    8311  // 1. Process free space before this allocation.
    8312  if(lastOffset < suballoc.offset)
    8313  {
    8314  // There is free space from lastOffset to suballoc.offset.
    8315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8316  inoutStats.unusedSize += unusedRangeSize;
    8317  ++inoutStats.unusedRangeCount;
    8318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8319  }
    8320 
    8321  // 2. Process this allocation.
    8322  // There is allocation with suballoc.offset, suballoc.size.
    8323  ++inoutStats.allocationCount;
    8324 
    8325  // 3. Prepare for next iteration.
    8326  lastOffset = suballoc.offset + suballoc.size;
    8327  ++nextAlloc1stIndex;
    8328  }
    8329  // We are at the end.
    8330  else
    8331  {
    8332  if(lastOffset < freeSpace1stTo2ndEnd)
    8333  {
    8334  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8335  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8336  inoutStats.unusedSize += unusedRangeSize;
    8337  ++inoutStats.unusedRangeCount;
    8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8339  }
    8340 
    8341  // End of loop.
    8342  lastOffset = freeSpace1stTo2ndEnd;
    8343  }
    8344  }
    8345 
    8346  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8347  {
    8348  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8349  while(lastOffset < size)
    8350  {
    8351  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8352  while(nextAlloc2ndIndex != SIZE_MAX &&
    8353  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8354  {
    8355  --nextAlloc2ndIndex;
    8356  }
    8357 
    8358  // Found non-null allocation.
    8359  if(nextAlloc2ndIndex != SIZE_MAX)
    8360  {
    8361  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8362 
    8363  // 1. Process free space before this allocation.
    8364  if(lastOffset < suballoc.offset)
    8365  {
    8366  // There is free space from lastOffset to suballoc.offset.
    8367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8368  inoutStats.unusedSize += unusedRangeSize;
    8369  ++inoutStats.unusedRangeCount;
    8370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8371  }
    8372 
    8373  // 2. Process this allocation.
    8374  // There is allocation with suballoc.offset, suballoc.size.
    8375  ++inoutStats.allocationCount;
    8376 
    8377  // 3. Prepare for next iteration.
    8378  lastOffset = suballoc.offset + suballoc.size;
    8379  --nextAlloc2ndIndex;
    8380  }
    8381  // We are at the end.
    8382  else
    8383  {
    8384  if(lastOffset < size)
    8385  {
    8386  // There is free space from lastOffset to size.
    8387  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8388  inoutStats.unusedSize += unusedRangeSize;
    8389  ++inoutStats.unusedRangeCount;
    8390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8391  }
    8392 
    8393  // End of loop.
    8394  lastOffset = size;
    8395  }
    8396  }
    8397  }
    8398 }
    8399 
    8400 #if VMA_STATS_STRING_ENABLED
    8401 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8402 {
    8403  const VkDeviceSize size = GetSize();
    8404  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8405  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8406  const size_t suballoc1stCount = suballocations1st.size();
    8407  const size_t suballoc2ndCount = suballocations2nd.size();
    8408 
    8409  // FIRST PASS
    8410 
    8411  size_t unusedRangeCount = 0;
    8412  VkDeviceSize usedBytes = 0;
    8413 
    8414  VkDeviceSize lastOffset = 0;
    8415 
    8416  size_t alloc2ndCount = 0;
    8417  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8418  {
    8419  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8420  size_t nextAlloc2ndIndex = 0;
    8421  while(lastOffset < freeSpace2ndTo1stEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8424  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8425  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc2ndIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc2ndIndex < suballoc2ndCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  ++unusedRangeCount;
    8440  }
    8441 
    8442  // 2. Process this allocation.
    8443  // There is allocation with suballoc.offset, suballoc.size.
    8444  ++alloc2ndCount;
    8445  usedBytes += suballoc.size;
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc2ndIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace2ndTo1stEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8457  ++unusedRangeCount;
    8458  }
    8459 
    8460  // End of loop.
    8461  lastOffset = freeSpace2ndTo1stEnd;
    8462  }
    8463  }
    8464  }
    8465 
    8466  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8467  size_t alloc1stCount = 0;
    8468  const VkDeviceSize freeSpace1stTo2ndEnd =
    8469  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8470  while(lastOffset < freeSpace1stTo2ndEnd)
    8471  {
    8472  // Find next non-null allocation or move nextAllocIndex to the end.
    8473  while(nextAlloc1stIndex < suballoc1stCount &&
    8474  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8475  {
    8476  ++nextAlloc1stIndex;
    8477  }
    8478 
    8479  // Found non-null allocation.
    8480  if(nextAlloc1stIndex < suballoc1stCount)
    8481  {
    8482  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8483 
    8484  // 1. Process free space before this allocation.
    8485  if(lastOffset < suballoc.offset)
    8486  {
    8487  // There is free space from lastOffset to suballoc.offset.
    8488  ++unusedRangeCount;
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  ++alloc1stCount;
    8494  usedBytes += suballoc.size;
    8495 
    8496  // 3. Prepare for next iteration.
    8497  lastOffset = suballoc.offset + suballoc.size;
    8498  ++nextAlloc1stIndex;
    8499  }
    8500  // We are at the end.
    8501  else
    8502  {
    8503  if(lastOffset < size)
    8504  {
    8505  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8506  ++unusedRangeCount;
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = freeSpace1stTo2ndEnd;
    8511  }
    8512  }
    8513 
    8514  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8515  {
    8516  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8517  while(lastOffset < size)
    8518  {
    8519  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8520  while(nextAlloc2ndIndex != SIZE_MAX &&
    8521  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8522  {
    8523  --nextAlloc2ndIndex;
    8524  }
    8525 
    8526  // Found non-null allocation.
    8527  if(nextAlloc2ndIndex != SIZE_MAX)
    8528  {
    8529  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8530 
    8531  // 1. Process free space before this allocation.
    8532  if(lastOffset < suballoc.offset)
    8533  {
    8534  // There is free space from lastOffset to suballoc.offset.
    8535  ++unusedRangeCount;
    8536  }
    8537 
    8538  // 2. Process this allocation.
    8539  // There is allocation with suballoc.offset, suballoc.size.
    8540  ++alloc2ndCount;
    8541  usedBytes += suballoc.size;
    8542 
    8543  // 3. Prepare for next iteration.
    8544  lastOffset = suballoc.offset + suballoc.size;
    8545  --nextAlloc2ndIndex;
    8546  }
    8547  // We are at the end.
    8548  else
    8549  {
    8550  if(lastOffset < size)
    8551  {
    8552  // There is free space from lastOffset to size.
    8553  ++unusedRangeCount;
    8554  }
    8555 
    8556  // End of loop.
    8557  lastOffset = size;
    8558  }
    8559  }
    8560  }
    8561 
    8562  const VkDeviceSize unusedBytes = size - usedBytes;
    8563  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8564 
    8565  // SECOND PASS
    8566  lastOffset = 0;
    8567 
    8568  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8569  {
    8570  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8571  size_t nextAlloc2ndIndex = 0;
    8572  while(lastOffset < freeSpace2ndTo1stEnd)
    8573  {
    8574  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8575  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8576  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8577  {
    8578  ++nextAlloc2ndIndex;
    8579  }
    8580 
    8581  // Found non-null allocation.
    8582  if(nextAlloc2ndIndex < suballoc2ndCount)
    8583  {
    8584  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8585 
    8586  // 1. Process free space before this allocation.
    8587  if(lastOffset < suballoc.offset)
    8588  {
    8589  // There is free space from lastOffset to suballoc.offset.
    8590  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8592  }
    8593 
    8594  // 2. Process this allocation.
    8595  // There is allocation with suballoc.offset, suballoc.size.
    8596  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8597 
    8598  // 3. Prepare for next iteration.
    8599  lastOffset = suballoc.offset + suballoc.size;
    8600  ++nextAlloc2ndIndex;
    8601  }
    8602  // We are at the end.
    8603  else
    8604  {
    8605  if(lastOffset < freeSpace2ndTo1stEnd)
    8606  {
    8607  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8608  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8609  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8610  }
    8611 
    8612  // End of loop.
    8613  lastOffset = freeSpace2ndTo1stEnd;
    8614  }
    8615  }
    8616  }
    8617 
    8618  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8619  while(lastOffset < freeSpace1stTo2ndEnd)
    8620  {
    8621  // Find next non-null allocation or move nextAllocIndex to the end.
    8622  while(nextAlloc1stIndex < suballoc1stCount &&
    8623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8624  {
    8625  ++nextAlloc1stIndex;
    8626  }
    8627 
    8628  // Found non-null allocation.
    8629  if(nextAlloc1stIndex < suballoc1stCount)
    8630  {
    8631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8632 
    8633  // 1. Process free space before this allocation.
    8634  if(lastOffset < suballoc.offset)
    8635  {
    8636  // There is free space from lastOffset to suballoc.offset.
    8637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8639  }
    8640 
    8641  // 2. Process this allocation.
    8642  // There is allocation with suballoc.offset, suballoc.size.
    8643  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8644 
    8645  // 3. Prepare for next iteration.
    8646  lastOffset = suballoc.offset + suballoc.size;
    8647  ++nextAlloc1stIndex;
    8648  }
    8649  // We are at the end.
    8650  else
    8651  {
    8652  if(lastOffset < freeSpace1stTo2ndEnd)
    8653  {
    8654  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8655  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8656  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8657  }
    8658 
    8659  // End of loop.
    8660  lastOffset = freeSpace1stTo2ndEnd;
    8661  }
    8662  }
    8663 
    8664  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8665  {
    8666  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8667  while(lastOffset < size)
    8668  {
    8669  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8670  while(nextAlloc2ndIndex != SIZE_MAX &&
    8671  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8672  {
    8673  --nextAlloc2ndIndex;
    8674  }
    8675 
    8676  // Found non-null allocation.
    8677  if(nextAlloc2ndIndex != SIZE_MAX)
    8678  {
    8679  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8680 
    8681  // 1. Process free space before this allocation.
    8682  if(lastOffset < suballoc.offset)
    8683  {
    8684  // There is free space from lastOffset to suballoc.offset.
    8685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8687  }
    8688 
    8689  // 2. Process this allocation.
    8690  // There is allocation with suballoc.offset, suballoc.size.
    8691  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8692 
    8693  // 3. Prepare for next iteration.
    8694  lastOffset = suballoc.offset + suballoc.size;
    8695  --nextAlloc2ndIndex;
    8696  }
    8697  // We are at the end.
    8698  else
    8699  {
    8700  if(lastOffset < size)
    8701  {
    8702  // There is free space from lastOffset to size.
    8703  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8704  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8705  }
    8706 
    8707  // End of loop.
    8708  lastOffset = size;
    8709  }
    8710  }
    8711  }
    8712 
    8713  PrintDetailedMap_End(json);
    8714 }
    8715 #endif // #if VMA_STATS_STRING_ENABLED
    8716 
    8717 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8718  uint32_t currentFrameIndex,
    8719  uint32_t frameInUseCount,
    8720  VkDeviceSize bufferImageGranularity,
    8721  VkDeviceSize allocSize,
    8722  VkDeviceSize allocAlignment,
    8723  bool upperAddress,
    8724  VmaSuballocationType allocType,
    8725  bool canMakeOtherLost,
    8726  uint32_t strategy,
    8727  VmaAllocationRequest* pAllocationRequest)
    8728 {
    8729  VMA_ASSERT(allocSize > 0);
    8730  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8731  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8732  VMA_HEAVY_ASSERT(Validate());
    8733 
    8734  const VkDeviceSize size = GetSize();
    8735  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8736  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8737 
    8738  if(upperAddress)
    8739  {
    8740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8741  {
    8742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8743  return false;
    8744  }
    8745 
    8746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8747  if(allocSize > size)
    8748  {
    8749  return false;
    8750  }
    8751  VkDeviceSize resultBaseOffset = size - allocSize;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset - allocSize;
    8756  if(allocSize > lastSuballoc.offset)
    8757  {
    8758  return false;
    8759  }
    8760  }
    8761 
    8762  // Start from offset equal to end of free space.
    8763  VkDeviceSize resultOffset = resultBaseOffset;
    8764 
    8765  // Apply VMA_DEBUG_MARGIN at the end.
    8766  if(VMA_DEBUG_MARGIN > 0)
    8767  {
    8768  if(resultOffset < VMA_DEBUG_MARGIN)
    8769  {
    8770  return false;
    8771  }
    8772  resultOffset -= VMA_DEBUG_MARGIN;
    8773  }
    8774 
    8775  // Apply alignment.
    8776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8777 
    8778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8779  // Make bigger alignment if necessary.
    8780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8781  {
    8782  bool bufferImageGranularityConflict = false;
    8783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8784  {
    8785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8787  {
    8788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8789  {
    8790  bufferImageGranularityConflict = true;
    8791  break;
    8792  }
    8793  }
    8794  else
    8795  // Already on previous page.
    8796  break;
    8797  }
    8798  if(bufferImageGranularityConflict)
    8799  {
    8800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8801  }
    8802  }
    8803 
    8804  // There is enough free space.
    8805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8806  suballocations1st.back().offset + suballocations1st.back().size :
    8807  0;
    8808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8809  {
    8810  // Check previous suballocations for BufferImageGranularity conflicts.
    8811  // If conflict exists, allocation cannot be made here.
    8812  if(bufferImageGranularity > 1)
    8813  {
    8814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8815  {
    8816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8818  {
    8819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8820  {
    8821  return false;
    8822  }
    8823  }
    8824  else
    8825  {
    8826  // Already on next page.
    8827  break;
    8828  }
    8829  }
    8830  }
    8831 
    8832  // All tests passed: Success.
    8833  pAllocationRequest->offset = resultOffset;
    8834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8835  pAllocationRequest->sumItemSize = 0;
    8836  // pAllocationRequest->item unused.
    8837  pAllocationRequest->itemsToMakeLostCount = 0;
    8838  return true;
    8839  }
    8840  }
    8841  else // !upperAddress
    8842  {
    8843  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8844  {
    8845  // Try to allocate at the end of 1st vector.
    8846 
    8847  VkDeviceSize resultBaseOffset = 0;
    8848  if(!suballocations1st.empty())
    8849  {
    8850  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8851  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8852  }
    8853 
    8854  // Start from offset equal to beginning of free space.
    8855  VkDeviceSize resultOffset = resultBaseOffset;
    8856 
    8857  // Apply VMA_DEBUG_MARGIN at the beginning.
    8858  if(VMA_DEBUG_MARGIN > 0)
    8859  {
    8860  resultOffset += VMA_DEBUG_MARGIN;
    8861  }
    8862 
    8863  // Apply alignment.
    8864  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8865 
    8866  // Check previous suballocations for BufferImageGranularity conflicts.
    8867  // Make bigger alignment if necessary.
    8868  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8869  {
    8870  bool bufferImageGranularityConflict = false;
    8871  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8872  {
    8873  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8874  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8875  {
    8876  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8877  {
    8878  bufferImageGranularityConflict = true;
    8879  break;
    8880  }
    8881  }
    8882  else
    8883  // Already on previous page.
    8884  break;
    8885  }
    8886  if(bufferImageGranularityConflict)
    8887  {
    8888  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8889  }
    8890  }
    8891 
    8892  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8893  suballocations2nd.back().offset : size;
    8894 
    8895  // There is enough free space at the end after alignment.
    8896  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8897  {
    8898  // Check next suballocations for BufferImageGranularity conflicts.
    8899  // If conflict exists, allocation cannot be made here.
    8900  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8901  {
    8902  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8903  {
    8904  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8905  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8906  {
    8907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8908  {
    8909  return false;
    8910  }
    8911  }
    8912  else
    8913  {
    8914  // Already on previous page.
    8915  break;
    8916  }
    8917  }
    8918  }
    8919 
    8920  // All tests passed: Success.
    8921  pAllocationRequest->offset = resultOffset;
    8922  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8923  pAllocationRequest->sumItemSize = 0;
    8924  // pAllocationRequest->item unused.
    8925  pAllocationRequest->itemsToMakeLostCount = 0;
    8926  return true;
    8927  }
    8928  }
    8929 
    8930  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8931  // beginning of 1st vector as the end of free space.
    8932  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8933  {
    8934  VMA_ASSERT(!suballocations1st.empty());
    8935 
    8936  VkDeviceSize resultBaseOffset = 0;
    8937  if(!suballocations2nd.empty())
    8938  {
    8939  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8940  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8941  }
    8942 
    8943  // Start from offset equal to beginning of free space.
    8944  VkDeviceSize resultOffset = resultBaseOffset;
    8945 
    8946  // Apply VMA_DEBUG_MARGIN at the beginning.
    8947  if(VMA_DEBUG_MARGIN > 0)
    8948  {
    8949  resultOffset += VMA_DEBUG_MARGIN;
    8950  }
    8951 
    8952  // Apply alignment.
    8953  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8954 
    8955  // Check previous suballocations for BufferImageGranularity conflicts.
    8956  // Make bigger alignment if necessary.
    8957  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8958  {
    8959  bool bufferImageGranularityConflict = false;
    8960  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8961  {
    8962  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8963  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8964  {
    8965  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8966  {
    8967  bufferImageGranularityConflict = true;
    8968  break;
    8969  }
    8970  }
    8971  else
    8972  // Already on previous page.
    8973  break;
    8974  }
    8975  if(bufferImageGranularityConflict)
    8976  {
    8977  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8978  }
    8979  }
    8980 
    8981  pAllocationRequest->itemsToMakeLostCount = 0;
    8982  pAllocationRequest->sumItemSize = 0;
    8983  size_t index1st = m_1stNullItemsBeginCount;
    8984 
    8985  if(canMakeOtherLost)
    8986  {
    8987  while(index1st < suballocations1st.size() &&
    8988  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8989  {
    8990  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8991  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8992  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8993  {
    8994  // No problem.
    8995  }
    8996  else
    8997  {
    8998  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8999  if(suballoc.hAllocation->CanBecomeLost() &&
    9000  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9001  {
    9002  ++pAllocationRequest->itemsToMakeLostCount;
    9003  pAllocationRequest->sumItemSize += suballoc.size;
    9004  }
    9005  else
    9006  {
    9007  return false;
    9008  }
    9009  }
    9010  ++index1st;
    9011  }
    9012 
    9013  // Check next suballocations for BufferImageGranularity conflicts.
    9014  // If conflict exists, we must mark more allocations lost or fail.
    9015  if(bufferImageGranularity > 1)
    9016  {
    9017  while(index1st < suballocations1st.size())
    9018  {
    9019  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9020  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9021  {
    9022  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9023  {
    9024  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9025  if(suballoc.hAllocation->CanBecomeLost() &&
    9026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9027  {
    9028  ++pAllocationRequest->itemsToMakeLostCount;
    9029  pAllocationRequest->sumItemSize += suballoc.size;
    9030  }
    9031  else
    9032  {
    9033  return false;
    9034  }
    9035  }
    9036  }
    9037  else
    9038  {
    9039  // Already on next page.
    9040  break;
    9041  }
    9042  ++index1st;
    9043  }
    9044  }
    9045  }
    9046 
    9047  // There is enough free space at the end after alignment.
    9048  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9049  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9050  {
    9051  // Check next suballocations for BufferImageGranularity conflicts.
    9052  // If conflict exists, allocation cannot be made here.
    9053  if(bufferImageGranularity > 1)
    9054  {
    9055  for(size_t nextSuballocIndex = index1st;
    9056  nextSuballocIndex < suballocations1st.size();
    9057  nextSuballocIndex++)
    9058  {
    9059  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9060  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9061  {
    9062  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9063  {
    9064  return false;
    9065  }
    9066  }
    9067  else
    9068  {
    9069  // Already on next page.
    9070  break;
    9071  }
    9072  }
    9073  }
    9074 
    9075  // All tests passed: Success.
    9076  pAllocationRequest->offset = resultOffset;
    9077  pAllocationRequest->sumFreeSize =
    9078  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9079  - resultBaseOffset
    9080  - pAllocationRequest->sumItemSize;
    9081  // pAllocationRequest->item unused.
    9082  return true;
    9083  }
    9084  }
    9085  }
    9086 
    9087  return false;
    9088 }
    9089 
    9090 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9091  uint32_t currentFrameIndex,
    9092  uint32_t frameInUseCount,
    9093  VmaAllocationRequest* pAllocationRequest)
    9094 {
    9095  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9096  {
    9097  return true;
    9098  }
    9099 
    9100  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9101 
    9102  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9103  size_t index1st = m_1stNullItemsBeginCount;
    9104  size_t madeLostCount = 0;
    9105  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9106  {
    9107  VMA_ASSERT(index1st < suballocations1st.size());
    9108  VmaSuballocation& suballoc = suballocations1st[index1st];
    9109  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9110  {
    9111  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9112  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9113  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9114  {
    9115  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9116  suballoc.hAllocation = VK_NULL_HANDLE;
    9117  m_SumFreeSize += suballoc.size;
    9118  ++m_1stNullItemsMiddleCount;
    9119  ++madeLostCount;
    9120  }
    9121  else
    9122  {
    9123  return false;
    9124  }
    9125  }
    9126  ++index1st;
    9127  }
    9128 
    9129  CleanupAfterFree();
    9130  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9131 
    9132  return true;
    9133 }
    9134 
    9135 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9136 {
    9137  uint32_t lostAllocationCount = 0;
    9138 
    9139  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9140  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9141  {
    9142  VmaSuballocation& suballoc = suballocations1st[i];
    9143  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9144  suballoc.hAllocation->CanBecomeLost() &&
    9145  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9146  {
    9147  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9148  suballoc.hAllocation = VK_NULL_HANDLE;
    9149  ++m_1stNullItemsMiddleCount;
    9150  m_SumFreeSize += suballoc.size;
    9151  ++lostAllocationCount;
    9152  }
    9153  }
    9154 
    9155  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9156  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9157  {
    9158  VmaSuballocation& suballoc = suballocations2nd[i];
    9159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9160  suballoc.hAllocation->CanBecomeLost() &&
    9161  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9162  {
    9163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9164  suballoc.hAllocation = VK_NULL_HANDLE;
    9165  ++m_2ndNullItemsCount;
    9166  ++lostAllocationCount;
    9167  }
    9168  }
    9169 
    9170  if(lostAllocationCount)
    9171  {
    9172  CleanupAfterFree();
    9173  }
    9174 
    9175  return lostAllocationCount;
    9176 }
    9177 
    9178 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9179 {
    9180  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9181  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9182  {
    9183  const VmaSuballocation& suballoc = suballocations1st[i];
    9184  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9185  {
    9186  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9187  {
    9188  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9189  return VK_ERROR_VALIDATION_FAILED_EXT;
    9190  }
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  }
    9197  }
    9198 
    9199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9201  {
    9202  const VmaSuballocation& suballoc = suballocations2nd[i];
    9203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9204  {
    9205  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9206  {
    9207  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9208  return VK_ERROR_VALIDATION_FAILED_EXT;
    9209  }
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  }
    9216  }
    9217 
    9218  return VK_SUCCESS;
    9219 }
    9220 
    9221 void VmaBlockMetadata_Linear::Alloc(
    9222  const VmaAllocationRequest& request,
    9223  VmaSuballocationType type,
    9224  VkDeviceSize allocSize,
    9225  bool upperAddress,
    9226  VmaAllocation hAllocation)
    9227 {
    9228  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9229 
    9230  if(upperAddress)
    9231  {
    9232  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9233  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9235  suballocations2nd.push_back(newSuballoc);
    9236  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9237  }
    9238  else
    9239  {
    9240  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9241 
    9242  // First allocation.
    9243  if(suballocations1st.empty())
    9244  {
    9245  suballocations1st.push_back(newSuballoc);
    9246  }
    9247  else
    9248  {
    9249  // New allocation at the end of 1st vector.
    9250  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9251  {
    9252  // Check if it fits before the end of the block.
    9253  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9254  suballocations1st.push_back(newSuballoc);
    9255  }
    9256  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9257  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9258  {
    9259  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9260 
    9261  switch(m_2ndVectorMode)
    9262  {
    9263  case SECOND_VECTOR_EMPTY:
    9264  // First allocation from second part ring buffer.
    9265  VMA_ASSERT(suballocations2nd.empty());
    9266  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9267  break;
    9268  case SECOND_VECTOR_RING_BUFFER:
    9269  // 2-part ring buffer is already started.
    9270  VMA_ASSERT(!suballocations2nd.empty());
    9271  break;
    9272  case SECOND_VECTOR_DOUBLE_STACK:
    9273  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9274  break;
    9275  default:
    9276  VMA_ASSERT(0);
    9277  }
    9278 
    9279  suballocations2nd.push_back(newSuballoc);
    9280  }
    9281  else
    9282  {
    9283  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9284  }
    9285  }
    9286  }
    9287 
    9288  m_SumFreeSize -= newSuballoc.size;
    9289 }
    9290 
    9291 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9292 {
    9293  FreeAtOffset(allocation->GetOffset());
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9297 {
    9298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9300 
    9301  if(!suballocations1st.empty())
    9302  {
    9303  // First allocation: Mark it as next empty at the beginning.
    9304  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9305  if(firstSuballoc.offset == offset)
    9306  {
    9307  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9308  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9309  m_SumFreeSize += firstSuballoc.size;
    9310  ++m_1stNullItemsBeginCount;
    9311  CleanupAfterFree();
    9312  return;
    9313  }
    9314  }
    9315 
    9316  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9317  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9318  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9319  {
    9320  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9321  if(lastSuballoc.offset == offset)
    9322  {
    9323  m_SumFreeSize += lastSuballoc.size;
    9324  suballocations2nd.pop_back();
    9325  CleanupAfterFree();
    9326  return;
    9327  }
    9328  }
    9329  // Last allocation in 1st vector.
    9330  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9331  {
    9332  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9333  if(lastSuballoc.offset == offset)
    9334  {
    9335  m_SumFreeSize += lastSuballoc.size;
    9336  suballocations1st.pop_back();
    9337  CleanupAfterFree();
    9338  return;
    9339  }
    9340  }
    9341 
    9342  // Item from the middle of 1st vector.
    9343  {
    9344  VmaSuballocation refSuballoc;
    9345  refSuballoc.offset = offset;
    9346  // Rest of members stays uninitialized intentionally for better performance.
    9347  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9348  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9349  suballocations1st.end(),
    9350  refSuballoc);
    9351  if(it != suballocations1st.end())
    9352  {
    9353  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9354  it->hAllocation = VK_NULL_HANDLE;
    9355  ++m_1stNullItemsMiddleCount;
    9356  m_SumFreeSize += it->size;
    9357  CleanupAfterFree();
    9358  return;
    9359  }
    9360  }
    9361 
    9362  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9363  {
    9364  // Item from the middle of 2nd vector.
    9365  VmaSuballocation refSuballoc;
    9366  refSuballoc.offset = offset;
    9367  // Rest of members stays uninitialized intentionally for better performance.
    9368  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9369  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9370  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9371  if(it != suballocations2nd.end())
    9372  {
    9373  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9374  it->hAllocation = VK_NULL_HANDLE;
    9375  ++m_2ndNullItemsCount;
    9376  m_SumFreeSize += it->size;
    9377  CleanupAfterFree();
    9378  return;
    9379  }
    9380  }
    9381 
    9382  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9383 }
    9384 
    9385 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9386 {
    9387  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9388  const size_t suballocCount = AccessSuballocations1st().size();
    9389  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9390 }
    9391 
    9392 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9393 {
    9394  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9395  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9396 
    9397  if(IsEmpty())
    9398  {
    9399  suballocations1st.clear();
    9400  suballocations2nd.clear();
    9401  m_1stNullItemsBeginCount = 0;
    9402  m_1stNullItemsMiddleCount = 0;
    9403  m_2ndNullItemsCount = 0;
    9404  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9405  }
    9406  else
    9407  {
    9408  const size_t suballoc1stCount = suballocations1st.size();
    9409  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9410  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9411 
    9412  // Find more null items at the beginning of 1st vector.
    9413  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9414  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9415  {
    9416  ++m_1stNullItemsBeginCount;
    9417  --m_1stNullItemsMiddleCount;
    9418  }
    9419 
    9420  // Find more null items at the end of 1st vector.
    9421  while(m_1stNullItemsMiddleCount > 0 &&
    9422  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9423  {
    9424  --m_1stNullItemsMiddleCount;
    9425  suballocations1st.pop_back();
    9426  }
    9427 
    9428  // Find more null items at the end of 2nd vector.
    9429  while(m_2ndNullItemsCount > 0 &&
    9430  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9431  {
    9432  --m_2ndNullItemsCount;
    9433  suballocations2nd.pop_back();
    9434  }
    9435 
    9436  if(ShouldCompact1st())
    9437  {
    9438  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9439  size_t srcIndex = m_1stNullItemsBeginCount;
    9440  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9441  {
    9442  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9443  {
    9444  ++srcIndex;
    9445  }
    9446  if(dstIndex != srcIndex)
    9447  {
    9448  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9449  }
    9450  ++srcIndex;
    9451  }
    9452  suballocations1st.resize(nonNullItemCount);
    9453  m_1stNullItemsBeginCount = 0;
    9454  m_1stNullItemsMiddleCount = 0;
    9455  }
    9456 
    9457  // 2nd vector became empty.
    9458  if(suballocations2nd.empty())
    9459  {
    9460  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9461  }
    9462 
    9463  // 1st vector became empty.
    9464  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9465  {
    9466  suballocations1st.clear();
    9467  m_1stNullItemsBeginCount = 0;
    9468 
    9469  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9470  {
    9471  // Swap 1st with 2nd. Now 2nd is empty.
    9472  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9473  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9474  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9475  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9476  {
    9477  ++m_1stNullItemsBeginCount;
    9478  --m_1stNullItemsMiddleCount;
    9479  }
    9480  m_2ndNullItemsCount = 0;
    9481  m_1stVectorIndex ^= 1;
    9482  }
    9483  }
    9484  }
    9485 
    9486  VMA_HEAVY_ASSERT(Validate());
    9487 }
    9488 
    9489 
    9491 // class VmaBlockMetadata_Buddy
    9492 
    9493 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9494  VmaBlockMetadata(hAllocator),
    9495  m_Root(VMA_NULL),
    9496  m_AllocationCount(0),
    9497  m_FreeCount(1),
    9498  m_SumFreeSize(0)
    9499 {
    9500  memset(m_FreeList, 0, sizeof(m_FreeList));
    9501 }
    9502 
    9503 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9504 {
    9505  DeleteNode(m_Root);
    9506 }
    9507 
    9508 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9509 {
    9510  VmaBlockMetadata::Init(size);
    9511 
    9512  m_UsableSize = VmaPrevPow2(size);
    9513  m_SumFreeSize = m_UsableSize;
    9514 
    9515  // Calculate m_LevelCount.
    9516  m_LevelCount = 1;
    9517  while(m_LevelCount < MAX_LEVELS &&
    9518  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9519  {
    9520  ++m_LevelCount;
    9521  }
    9522 
    9523  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9524  rootNode->offset = 0;
    9525  rootNode->type = Node::TYPE_FREE;
    9526  rootNode->parent = VMA_NULL;
    9527  rootNode->buddy = VMA_NULL;
    9528 
    9529  m_Root = rootNode;
    9530  AddToFreeListFront(0, rootNode);
    9531 }
    9532 
    9533 bool VmaBlockMetadata_Buddy::Validate() const
    9534 {
    9535  // Validate tree.
    9536  ValidationContext ctx;
    9537  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9538  {
    9539  VMA_VALIDATE(false && "ValidateNode failed.");
    9540  }
    9541  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9542  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9543 
    9544  // Validate free node lists.
    9545  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9546  {
    9547  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9548  m_FreeList[level].front->free.prev == VMA_NULL);
    9549 
    9550  for(Node* node = m_FreeList[level].front;
    9551  node != VMA_NULL;
    9552  node = node->free.next)
    9553  {
    9554  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9555 
    9556  if(node->free.next == VMA_NULL)
    9557  {
    9558  VMA_VALIDATE(m_FreeList[level].back == node);
    9559  }
    9560  else
    9561  {
    9562  VMA_VALIDATE(node->free.next->free.prev == node);
    9563  }
    9564  }
    9565  }
    9566 
    9567  // Validate that free lists ar higher levels are empty.
    9568  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9569  {
    9570  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9571  }
    9572 
    9573  return true;
    9574 }
    9575 
    9576 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9577 {
    9578  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9579  {
    9580  if(m_FreeList[level].front != VMA_NULL)
    9581  {
    9582  return LevelToNodeSize(level);
    9583  }
    9584  }
    9585  return 0;
    9586 }
    9587 
    9588 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9589 {
    9590  const VkDeviceSize unusableSize = GetUnusableSize();
    9591 
    9592  outInfo.blockCount = 1;
    9593 
    9594  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9595  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9596 
    9597  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9598  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9599  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9600 
    9601  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9602 
    9603  if(unusableSize > 0)
    9604  {
    9605  ++outInfo.unusedRangeCount;
    9606  outInfo.unusedBytes += unusableSize;
    9607  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9608  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9609  }
    9610 }
    9611 
    9612 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9613 {
    9614  const VkDeviceSize unusableSize = GetUnusableSize();
    9615 
    9616  inoutStats.size += GetSize();
    9617  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9618  inoutStats.allocationCount += m_AllocationCount;
    9619  inoutStats.unusedRangeCount += m_FreeCount;
    9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9621 
    9622  if(unusableSize > 0)
    9623  {
    9624  ++inoutStats.unusedRangeCount;
    9625  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9626  }
    9627 }
    9628 
    9629 #if VMA_STATS_STRING_ENABLED
    9630 
    9631 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9632 {
    9633  // TODO optimize
    9634  VmaStatInfo stat;
    9635  CalcAllocationStatInfo(stat);
    9636 
    9637  PrintDetailedMap_Begin(
    9638  json,
    9639  stat.unusedBytes,
    9640  stat.allocationCount,
    9641  stat.unusedRangeCount);
    9642 
    9643  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9644 
    9645  const VkDeviceSize unusableSize = GetUnusableSize();
    9646  if(unusableSize > 0)
    9647  {
    9648  PrintDetailedMap_UnusedRange(json,
    9649  m_UsableSize, // offset
    9650  unusableSize); // size
    9651  }
    9652 
    9653  PrintDetailedMap_End(json);
    9654 }
    9655 
    9656 #endif // #if VMA_STATS_STRING_ENABLED
    9657 
    9658 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9659  uint32_t currentFrameIndex,
    9660  uint32_t frameInUseCount,
    9661  VkDeviceSize bufferImageGranularity,
    9662  VkDeviceSize allocSize,
    9663  VkDeviceSize allocAlignment,
    9664  bool upperAddress,
    9665  VmaSuballocationType allocType,
    9666  bool canMakeOtherLost,
    9667  uint32_t strategy,
    9668  VmaAllocationRequest* pAllocationRequest)
    9669 {
    9670  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9671 
    9672  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9673  // Whenever it might be an OPTIMAL image...
    9674  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9675  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9676  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9677  {
    9678  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9679  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9680  }
    9681 
    9682  if(allocSize > m_UsableSize)
    9683  {
    9684  return false;
    9685  }
    9686 
    9687  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9688  for(uint32_t level = targetLevel + 1; level--; )
    9689  {
    9690  for(Node* freeNode = m_FreeList[level].front;
    9691  freeNode != VMA_NULL;
    9692  freeNode = freeNode->free.next)
    9693  {
    9694  if(freeNode->offset % allocAlignment == 0)
    9695  {
    9696  pAllocationRequest->offset = freeNode->offset;
    9697  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9698  pAllocationRequest->sumItemSize = 0;
    9699  pAllocationRequest->itemsToMakeLostCount = 0;
    9700  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9701  return true;
    9702  }
    9703  }
    9704  }
    9705 
    9706  return false;
    9707 }
    9708 
    9709 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9710  uint32_t currentFrameIndex,
    9711  uint32_t frameInUseCount,
    9712  VmaAllocationRequest* pAllocationRequest)
    9713 {
    9714  /*
    9715  Lost allocations are not supported in buddy allocator at the moment.
    9716  Support might be added in the future.
    9717  */
    9718  return pAllocationRequest->itemsToMakeLostCount == 0;
    9719 }
    9720 
    9721 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9722 {
    9723  /*
    9724  Lost allocations are not supported in buddy allocator at the moment.
    9725  Support might be added in the future.
    9726  */
    9727  return 0;
    9728 }
    9729 
    9730 void VmaBlockMetadata_Buddy::Alloc(
    9731  const VmaAllocationRequest& request,
    9732  VmaSuballocationType type,
    9733  VkDeviceSize allocSize,
    9734  bool upperAddress,
    9735  VmaAllocation hAllocation)
    9736 {
    9737  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9738  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9739 
    9740  Node* currNode = m_FreeList[currLevel].front;
    9741  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9742  while(currNode->offset != request.offset)
    9743  {
    9744  currNode = currNode->free.next;
    9745  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9746  }
    9747 
    9748  // Go down, splitting free nodes.
    9749  while(currLevel < targetLevel)
    9750  {
    9751  // currNode is already first free node at currLevel.
    9752  // Remove it from list of free nodes at this currLevel.
    9753  RemoveFromFreeList(currLevel, currNode);
    9754 
    9755  const uint32_t childrenLevel = currLevel + 1;
    9756 
    9757  // Create two free sub-nodes.
    9758  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9759  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9760 
    9761  leftChild->offset = currNode->offset;
    9762  leftChild->type = Node::TYPE_FREE;
    9763  leftChild->parent = currNode;
    9764  leftChild->buddy = rightChild;
    9765 
    9766  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9767  rightChild->type = Node::TYPE_FREE;
    9768  rightChild->parent = currNode;
    9769  rightChild->buddy = leftChild;
    9770 
    9771  // Convert current currNode to split type.
    9772  currNode->type = Node::TYPE_SPLIT;
    9773  currNode->split.leftChild = leftChild;
    9774 
    9775  // Add child nodes to free list. Order is important!
    9776  AddToFreeListFront(childrenLevel, rightChild);
    9777  AddToFreeListFront(childrenLevel, leftChild);
    9778 
    9779  ++m_FreeCount;
    9780  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9781  ++currLevel;
    9782  currNode = m_FreeList[currLevel].front;
    9783 
    9784  /*
    9785  We can be sure that currNode, as left child of node previously split,
    9786  also fullfills the alignment requirement.
    9787  */
    9788  }
    9789 
    9790  // Remove from free list.
    9791  VMA_ASSERT(currLevel == targetLevel &&
    9792  currNode != VMA_NULL &&
    9793  currNode->type == Node::TYPE_FREE);
    9794  RemoveFromFreeList(currLevel, currNode);
    9795 
    9796  // Convert to allocation node.
    9797  currNode->type = Node::TYPE_ALLOCATION;
    9798  currNode->allocation.alloc = hAllocation;
    9799 
    9800  ++m_AllocationCount;
    9801  --m_FreeCount;
    9802  m_SumFreeSize -= allocSize;
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9806 {
    9807  if(node->type == Node::TYPE_SPLIT)
    9808  {
    9809  DeleteNode(node->split.leftChild->buddy);
    9810  DeleteNode(node->split.leftChild);
    9811  }
    9812 
    9813  vma_delete(GetAllocationCallbacks(), node);
    9814 }
    9815 
    9816 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9817 {
    9818  VMA_VALIDATE(level < m_LevelCount);
    9819  VMA_VALIDATE(curr->parent == parent);
    9820  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9821  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9822  switch(curr->type)
    9823  {
    9824  case Node::TYPE_FREE:
    9825  // curr->free.prev, next are validated separately.
    9826  ctx.calculatedSumFreeSize += levelNodeSize;
    9827  ++ctx.calculatedFreeCount;
    9828  break;
    9829  case Node::TYPE_ALLOCATION:
    9830  ++ctx.calculatedAllocationCount;
    9831  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9832  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9833  break;
    9834  case Node::TYPE_SPLIT:
    9835  {
    9836  const uint32_t childrenLevel = level + 1;
    9837  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9838  const Node* const leftChild = curr->split.leftChild;
    9839  VMA_VALIDATE(leftChild != VMA_NULL);
    9840  VMA_VALIDATE(leftChild->offset == curr->offset);
    9841  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9842  {
    9843  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9844  }
    9845  const Node* const rightChild = leftChild->buddy;
    9846  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9847  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9850  }
    9851  }
    9852  break;
    9853  default:
    9854  return false;
    9855  }
    9856 
    9857  return true;
    9858 }
    9859 
    9860 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9861 {
    9862  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9863  uint32_t level = 0;
    9864  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9865  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9866  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9867  {
    9868  ++level;
    9869  currLevelNodeSize = nextLevelNodeSize;
    9870  nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  }
    9872  return level;
    9873 }
    9874 
    9875 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9876 {
    9877  // Find node and level.
    9878  Node* node = m_Root;
    9879  VkDeviceSize nodeOffset = 0;
    9880  uint32_t level = 0;
    9881  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9882  while(node->type == Node::TYPE_SPLIT)
    9883  {
    9884  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9885  if(offset < nodeOffset + nextLevelSize)
    9886  {
    9887  node = node->split.leftChild;
    9888  }
    9889  else
    9890  {
    9891  node = node->split.leftChild->buddy;
    9892  nodeOffset += nextLevelSize;
    9893  }
    9894  ++level;
    9895  levelNodeSize = nextLevelSize;
    9896  }
    9897 
    9898  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9899  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9900 
    9901  ++m_FreeCount;
    9902  --m_AllocationCount;
    9903  m_SumFreeSize += alloc->GetSize();
    9904 
    9905  node->type = Node::TYPE_FREE;
    9906 
    9907  // Join free nodes if possible.
    9908  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9909  {
    9910  RemoveFromFreeList(level, node->buddy);
    9911  Node* const parent = node->parent;
    9912 
    9913  vma_delete(GetAllocationCallbacks(), node->buddy);
    9914  vma_delete(GetAllocationCallbacks(), node);
    9915  parent->type = Node::TYPE_FREE;
    9916 
    9917  node = parent;
    9918  --level;
    9919  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9920  --m_FreeCount;
    9921  }
    9922 
    9923  AddToFreeListFront(level, node);
    9924 }
    9925 
    9926 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9927 {
    9928  switch(node->type)
    9929  {
    9930  case Node::TYPE_FREE:
    9931  ++outInfo.unusedRangeCount;
    9932  outInfo.unusedBytes += levelNodeSize;
    9933  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9934  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9935  break;
    9936  case Node::TYPE_ALLOCATION:
    9937  {
    9938  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9939  ++outInfo.allocationCount;
    9940  outInfo.usedBytes += allocSize;
    9941  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9942  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9943 
    9944  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9945  if(unusedRangeSize > 0)
    9946  {
    9947  ++outInfo.unusedRangeCount;
    9948  outInfo.unusedBytes += unusedRangeSize;
    9949  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9950  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9951  }
    9952  }
    9953  break;
    9954  case Node::TYPE_SPLIT:
    9955  {
    9956  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9957  const Node* const leftChild = node->split.leftChild;
    9958  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9959  const Node* const rightChild = leftChild->buddy;
    9960  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9961  }
    9962  break;
    9963  default:
    9964  VMA_ASSERT(0);
    9965  }
    9966 }
    9967 
    9968 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9969 {
    9970  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9971 
    9972  // List is empty.
    9973  Node* const frontNode = m_FreeList[level].front;
    9974  if(frontNode == VMA_NULL)
    9975  {
    9976  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9977  node->free.prev = node->free.next = VMA_NULL;
    9978  m_FreeList[level].front = m_FreeList[level].back = node;
    9979  }
    9980  else
    9981  {
    9982  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9983  node->free.prev = VMA_NULL;
    9984  node->free.next = frontNode;
    9985  frontNode->free.prev = node;
    9986  m_FreeList[level].front = node;
    9987  }
    9988 }
    9989 
    9990 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9991 {
    9992  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9993 
    9994  // It is at the front.
    9995  if(node->free.prev == VMA_NULL)
    9996  {
    9997  VMA_ASSERT(m_FreeList[level].front == node);
    9998  m_FreeList[level].front = node->free.next;
    9999  }
    10000  else
    10001  {
    10002  Node* const prevFreeNode = node->free.prev;
    10003  VMA_ASSERT(prevFreeNode->free.next == node);
    10004  prevFreeNode->free.next = node->free.next;
    10005  }
    10006 
    10007  // It is at the back.
    10008  if(node->free.next == VMA_NULL)
    10009  {
    10010  VMA_ASSERT(m_FreeList[level].back == node);
    10011  m_FreeList[level].back = node->free.prev;
    10012  }
    10013  else
    10014  {
    10015  Node* const nextFreeNode = node->free.next;
    10016  VMA_ASSERT(nextFreeNode->free.prev == node);
    10017  nextFreeNode->free.prev = node->free.prev;
    10018  }
    10019 }
    10020 
    10021 #if VMA_STATS_STRING_ENABLED
    10022 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10023 {
    10024  switch(node->type)
    10025  {
    10026  case Node::TYPE_FREE:
    10027  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10028  break;
    10029  case Node::TYPE_ALLOCATION:
    10030  {
    10031  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10032  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10033  if(allocSize < levelNodeSize)
    10034  {
    10035  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10036  }
    10037  }
    10038  break;
    10039  case Node::TYPE_SPLIT:
    10040  {
    10041  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10042  const Node* const leftChild = node->split.leftChild;
    10043  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10044  const Node* const rightChild = leftChild->buddy;
    10045  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10046  }
    10047  break;
    10048  default:
    10049  VMA_ASSERT(0);
    10050  }
    10051 }
    10052 #endif // #if VMA_STATS_STRING_ENABLED
    10053 
    10054 
    10056 // class VmaDeviceMemoryBlock
    10057 
    10058 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10059  m_pMetadata(VMA_NULL),
    10060  m_MemoryTypeIndex(UINT32_MAX),
    10061  m_Id(0),
    10062  m_hMemory(VK_NULL_HANDLE),
    10063  m_MapCount(0),
    10064  m_pMappedData(VMA_NULL)
    10065 {
    10066 }
    10067 
    10068 void VmaDeviceMemoryBlock::Init(
    10069  VmaAllocator hAllocator,
    10070  uint32_t newMemoryTypeIndex,
    10071  VkDeviceMemory newMemory,
    10072  VkDeviceSize newSize,
    10073  uint32_t id,
    10074  uint32_t algorithm)
    10075 {
    10076  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10077 
    10078  m_MemoryTypeIndex = newMemoryTypeIndex;
    10079  m_Id = id;
    10080  m_hMemory = newMemory;
    10081 
    10082  switch(algorithm)
    10083  {
    10085  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10086  break;
    10088  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10089  break;
    10090  default:
    10091  VMA_ASSERT(0);
    10092  // Fall-through.
    10093  case 0:
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10095  }
    10096  m_pMetadata->Init(newSize);
    10097 }
    10098 
    10099 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10100 {
    10101  // This is the most important assert in the entire library.
    10102  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10103  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10104 
    10105  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10106  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10107  m_hMemory = VK_NULL_HANDLE;
    10108 
    10109  vma_delete(allocator, m_pMetadata);
    10110  m_pMetadata = VMA_NULL;
    10111 }
    10112 
    10113 bool VmaDeviceMemoryBlock::Validate() const
    10114 {
    10115  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10116  (m_pMetadata->GetSize() != 0));
    10117 
    10118  return m_pMetadata->Validate();
    10119 }
    10120 
    10121 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10122 {
    10123  void* pData = nullptr;
    10124  VkResult res = Map(hAllocator, 1, &pData);
    10125  if(res != VK_SUCCESS)
    10126  {
    10127  return res;
    10128  }
    10129 
    10130  res = m_pMetadata->CheckCorruption(pData);
    10131 
    10132  Unmap(hAllocator, 1);
    10133 
    10134  return res;
    10135 }
    10136 
    10137 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10138 {
    10139  if(count == 0)
    10140  {
    10141  return VK_SUCCESS;
    10142  }
    10143 
    10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10145  if(m_MapCount != 0)
    10146  {
    10147  m_MapCount += count;
    10148  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10149  if(ppData != VMA_NULL)
    10150  {
    10151  *ppData = m_pMappedData;
    10152  }
    10153  return VK_SUCCESS;
    10154  }
    10155  else
    10156  {
    10157  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10158  hAllocator->m_hDevice,
    10159  m_hMemory,
    10160  0, // offset
    10161  VK_WHOLE_SIZE,
    10162  0, // flags
    10163  &m_pMappedData);
    10164  if(result == VK_SUCCESS)
    10165  {
    10166  if(ppData != VMA_NULL)
    10167  {
    10168  *ppData = m_pMappedData;
    10169  }
    10170  m_MapCount = count;
    10171  }
    10172  return result;
    10173  }
    10174 }
    10175 
    10176 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10177 {
    10178  if(count == 0)
    10179  {
    10180  return;
    10181  }
    10182 
    10183  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10184  if(m_MapCount >= count)
    10185  {
    10186  m_MapCount -= count;
    10187  if(m_MapCount == 0)
    10188  {
    10189  m_pMappedData = VMA_NULL;
    10190  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10191  }
    10192  }
    10193  else
    10194  {
    10195  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10196  }
    10197 }
    10198 
    10199 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10200 {
    10201  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10202  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10203 
    10204  void* pData;
    10205  VkResult res = Map(hAllocator, 1, &pData);
    10206  if(res != VK_SUCCESS)
    10207  {
    10208  return res;
    10209  }
    10210 
    10211  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10212  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10213 
    10214  Unmap(hAllocator, 1);
    10215 
    10216  return VK_SUCCESS;
    10217 }
    10218 
    10219 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10220 {
    10221  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10222  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10223 
    10224  void* pData;
    10225  VkResult res = Map(hAllocator, 1, &pData);
    10226  if(res != VK_SUCCESS)
    10227  {
    10228  return res;
    10229  }
    10230 
    10231  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10232  {
    10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10234  }
    10235  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10236  {
    10237  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10238  }
    10239 
    10240  Unmap(hAllocator, 1);
    10241 
    10242  return VK_SUCCESS;
    10243 }
    10244 
    10245 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10246  const VmaAllocator hAllocator,
    10247  const VmaAllocation hAllocation,
    10248  VkBuffer hBuffer)
    10249 {
    10250  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10251  hAllocation->GetBlock() == this);
    10252  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10253  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10254  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10255  hAllocator->m_hDevice,
    10256  hBuffer,
    10257  m_hMemory,
    10258  hAllocation->GetOffset());
    10259 }
    10260 
    10261 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10262  const VmaAllocator hAllocator,
    10263  const VmaAllocation hAllocation,
    10264  VkImage hImage)
    10265 {
    10266  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10267  hAllocation->GetBlock() == this);
    10268  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10270  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10271  hAllocator->m_hDevice,
    10272  hImage,
    10273  m_hMemory,
    10274  hAllocation->GetOffset());
    10275 }
    10276 
    10277 static void InitStatInfo(VmaStatInfo& outInfo)
    10278 {
    10279  memset(&outInfo, 0, sizeof(outInfo));
    10280  outInfo.allocationSizeMin = UINT64_MAX;
    10281  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10282 }
    10283 
    10284 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10285 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10286 {
    10287  inoutInfo.blockCount += srcInfo.blockCount;
    10288  inoutInfo.allocationCount += srcInfo.allocationCount;
    10289  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10290  inoutInfo.usedBytes += srcInfo.usedBytes;
    10291  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10292  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10293  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10294  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10295  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10296 }
    10297 
    10298 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10299 {
    10300  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10301  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10302  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10304 }
    10305 
    10306 VmaPool_T::VmaPool_T(
    10307  VmaAllocator hAllocator,
    10308  const VmaPoolCreateInfo& createInfo,
    10309  VkDeviceSize preferredBlockSize) :
    10310  m_BlockVector(
    10311  hAllocator,
    10312  createInfo.memoryTypeIndex,
    10313  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10314  createInfo.minBlockCount,
    10315  createInfo.maxBlockCount,
    10316  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10317  createInfo.frameInUseCount,
    10318  true, // isCustomPool
    10319  createInfo.blockSize != 0, // explicitBlockSize
    10320  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10321  m_Id(0)
    10322 {
    10323 }
    10324 
    10325 VmaPool_T::~VmaPool_T()
    10326 {
    10327 }
    10328 
    10329 #if VMA_STATS_STRING_ENABLED
    10330 
    10331 #endif // #if VMA_STATS_STRING_ENABLED
    10332 
    10333 VmaBlockVector::VmaBlockVector(
    10334  VmaAllocator hAllocator,
    10335  uint32_t memoryTypeIndex,
    10336  VkDeviceSize preferredBlockSize,
    10337  size_t minBlockCount,
    10338  size_t maxBlockCount,
    10339  VkDeviceSize bufferImageGranularity,
    10340  uint32_t frameInUseCount,
    10341  bool isCustomPool,
    10342  bool explicitBlockSize,
    10343  uint32_t algorithm) :
    10344  m_hAllocator(hAllocator),
    10345  m_MemoryTypeIndex(memoryTypeIndex),
    10346  m_PreferredBlockSize(preferredBlockSize),
    10347  m_MinBlockCount(minBlockCount),
    10348  m_MaxBlockCount(maxBlockCount),
    10349  m_BufferImageGranularity(bufferImageGranularity),
    10350  m_FrameInUseCount(frameInUseCount),
    10351  m_IsCustomPool(isCustomPool),
    10352  m_ExplicitBlockSize(explicitBlockSize),
    10353  m_Algorithm(algorithm),
    10354  m_HasEmptyBlock(false),
    10355  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10356  m_pDefragmentator(VMA_NULL),
    10357  m_NextBlockId(0)
    10358 {
    10359 }
    10360 
    10361 VmaBlockVector::~VmaBlockVector()
    10362 {
    10363  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10364 
    10365  for(size_t i = m_Blocks.size(); i--; )
    10366  {
    10367  m_Blocks[i]->Destroy(m_hAllocator);
    10368  vma_delete(m_hAllocator, m_Blocks[i]);
    10369  }
    10370 }
    10371 
    10372 VkResult VmaBlockVector::CreateMinBlocks()
    10373 {
    10374  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10375  {
    10376  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10377  if(res != VK_SUCCESS)
    10378  {
    10379  return res;
    10380  }
    10381  }
    10382  return VK_SUCCESS;
    10383 }
    10384 
    10385 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10386 {
    10387  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10388 
    10389  const size_t blockCount = m_Blocks.size();
    10390 
    10391  pStats->size = 0;
    10392  pStats->unusedSize = 0;
    10393  pStats->allocationCount = 0;
    10394  pStats->unusedRangeCount = 0;
    10395  pStats->unusedRangeSizeMax = 0;
    10396  pStats->blockCount = blockCount;
    10397 
    10398  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10399  {
    10400  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10401  VMA_ASSERT(pBlock);
    10402  VMA_HEAVY_ASSERT(pBlock->Validate());
    10403  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10404  }
    10405 }
    10406 
    10407 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10408 {
    10409  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10410  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10411  (VMA_DEBUG_MARGIN > 0) &&
    10412  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10413 }
    10414 
    10415 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10416 
    10417 VkResult VmaBlockVector::Allocate(
    10418  VmaPool hCurrentPool,
    10419  uint32_t currentFrameIndex,
    10420  VkDeviceSize size,
    10421  VkDeviceSize alignment,
    10422  const VmaAllocationCreateInfo& createInfo,
    10423  VmaSuballocationType suballocType,
    10424  VmaAllocation* pAllocation)
    10425 {
    10426  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10427  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10428  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10429  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10430  const bool canCreateNewBlock =
    10431  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10432  (m_Blocks.size() < m_MaxBlockCount);
    10433  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10434 
    10435  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10436  // Which in turn is available only when maxBlockCount = 1.
    10437  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10438  {
    10439  canMakeOtherLost = false;
    10440  }
    10441 
    10442  // Upper address can only be used with linear allocator and within single memory block.
    10443  if(isUpperAddress &&
    10444  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10445  {
    10446  return VK_ERROR_FEATURE_NOT_PRESENT;
    10447  }
    10448 
    10449  // Validate strategy.
    10450  switch(strategy)
    10451  {
    10452  case 0:
    10454  break;
    10458  break;
    10459  default:
    10460  return VK_ERROR_FEATURE_NOT_PRESENT;
    10461  }
    10462 
    10463  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10464  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10465  {
    10466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10467  }
    10468 
    10469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10470 
    10471  /*
    10472  Under certain condition, this whole section can be skipped for optimization, so
    10473  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10474  e.g. for custom pools with linear algorithm.
    10475  */
    10476  if(!canMakeOtherLost || canCreateNewBlock)
    10477  {
    10478  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10479  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10481 
    10482  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10483  {
    10484  // Use only last block.
    10485  if(!m_Blocks.empty())
    10486  {
    10487  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10488  VMA_ASSERT(pCurrBlock);
    10489  VkResult res = AllocateFromBlock(
    10490  pCurrBlock,
    10491  hCurrentPool,
    10492  currentFrameIndex,
    10493  size,
    10494  alignment,
    10495  allocFlagsCopy,
    10496  createInfo.pUserData,
    10497  suballocType,
    10498  strategy,
    10499  pAllocation);
    10500  if(res == VK_SUCCESS)
    10501  {
    10502  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10503  return VK_SUCCESS;
    10504  }
    10505  }
    10506  }
    10507  else
    10508  {
    10510  {
    10511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10513  {
    10514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10515  VMA_ASSERT(pCurrBlock);
    10516  VkResult res = AllocateFromBlock(
    10517  pCurrBlock,
    10518  hCurrentPool,
    10519  currentFrameIndex,
    10520  size,
    10521  alignment,
    10522  allocFlagsCopy,
    10523  createInfo.pUserData,
    10524  suballocType,
    10525  strategy,
    10526  pAllocation);
    10527  if(res == VK_SUCCESS)
    10528  {
    10529  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10530  return VK_SUCCESS;
    10531  }
    10532  }
    10533  }
    10534  else // WORST_FIT, FIRST_FIT
    10535  {
    10536  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10537  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10538  {
    10539  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10540  VMA_ASSERT(pCurrBlock);
    10541  VkResult res = AllocateFromBlock(
    10542  pCurrBlock,
    10543  hCurrentPool,
    10544  currentFrameIndex,
    10545  size,
    10546  alignment,
    10547  allocFlagsCopy,
    10548  createInfo.pUserData,
    10549  suballocType,
    10550  strategy,
    10551  pAllocation);
    10552  if(res == VK_SUCCESS)
    10553  {
    10554  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10555  return VK_SUCCESS;
    10556  }
    10557  }
    10558  }
    10559  }
    10560 
    10561  // 2. Try to create new block.
    10562  if(canCreateNewBlock)
    10563  {
    10564  // Calculate optimal size for new block.
    10565  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10566  uint32_t newBlockSizeShift = 0;
    10567  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10568 
    10569  if(!m_ExplicitBlockSize)
    10570  {
    10571  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10572  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10573  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10574  {
    10575  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10576  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10577  {
    10578  newBlockSize = smallerNewBlockSize;
    10579  ++newBlockSizeShift;
    10580  }
    10581  else
    10582  {
    10583  break;
    10584  }
    10585  }
    10586  }
    10587 
    10588  size_t newBlockIndex = 0;
    10589  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10590  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10591  if(!m_ExplicitBlockSize)
    10592  {
    10593  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10594  {
    10595  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10596  if(smallerNewBlockSize >= size)
    10597  {
    10598  newBlockSize = smallerNewBlockSize;
    10599  ++newBlockSizeShift;
    10600  res = CreateBlock(newBlockSize, &newBlockIndex);
    10601  }
    10602  else
    10603  {
    10604  break;
    10605  }
    10606  }
    10607  }
    10608 
    10609  if(res == VK_SUCCESS)
    10610  {
    10611  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10612  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10613 
    10614  res = AllocateFromBlock(
    10615  pBlock,
    10616  hCurrentPool,
    10617  currentFrameIndex,
    10618  size,
    10619  alignment,
    10620  allocFlagsCopy,
    10621  createInfo.pUserData,
    10622  suballocType,
    10623  strategy,
    10624  pAllocation);
    10625  if(res == VK_SUCCESS)
    10626  {
    10627  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10628  return VK_SUCCESS;
    10629  }
    10630  else
    10631  {
    10632  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10633  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10634  }
    10635  }
    10636  }
    10637  }
    10638 
    10639  // 3. Try to allocate from existing blocks with making other allocations lost.
    10640  if(canMakeOtherLost)
    10641  {
    10642  uint32_t tryIndex = 0;
    10643  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10644  {
    10645  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10646  VmaAllocationRequest bestRequest = {};
    10647  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10648 
    10649  // 1. Search existing allocations.
    10651  {
    10652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10654  {
    10655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10656  VMA_ASSERT(pCurrBlock);
    10657  VmaAllocationRequest currRequest = {};
    10658  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10659  currentFrameIndex,
    10660  m_FrameInUseCount,
    10661  m_BufferImageGranularity,
    10662  size,
    10663  alignment,
    10664  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10665  suballocType,
    10666  canMakeOtherLost,
    10667  strategy,
    10668  &currRequest))
    10669  {
    10670  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10671  if(pBestRequestBlock == VMA_NULL ||
    10672  currRequestCost < bestRequestCost)
    10673  {
    10674  pBestRequestBlock = pCurrBlock;
    10675  bestRequest = currRequest;
    10676  bestRequestCost = currRequestCost;
    10677 
    10678  if(bestRequestCost == 0)
    10679  {
    10680  break;
    10681  }
    10682  }
    10683  }
    10684  }
    10685  }
    10686  else // WORST_FIT, FIRST_FIT
    10687  {
    10688  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10689  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10690  {
    10691  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10692  VMA_ASSERT(pCurrBlock);
    10693  VmaAllocationRequest currRequest = {};
    10694  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10695  currentFrameIndex,
    10696  m_FrameInUseCount,
    10697  m_BufferImageGranularity,
    10698  size,
    10699  alignment,
    10700  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10701  suballocType,
    10702  canMakeOtherLost,
    10703  strategy,
    10704  &currRequest))
    10705  {
    10706  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10707  if(pBestRequestBlock == VMA_NULL ||
    10708  currRequestCost < bestRequestCost ||
    10710  {
    10711  pBestRequestBlock = pCurrBlock;
    10712  bestRequest = currRequest;
    10713  bestRequestCost = currRequestCost;
    10714 
    10715  if(bestRequestCost == 0 ||
    10717  {
    10718  break;
    10719  }
    10720  }
    10721  }
    10722  }
    10723  }
    10724 
    10725  if(pBestRequestBlock != VMA_NULL)
    10726  {
    10727  if(mapped)
    10728  {
    10729  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10730  if(res != VK_SUCCESS)
    10731  {
    10732  return res;
    10733  }
    10734  }
    10735 
    10736  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  &bestRequest))
    10740  {
    10741  // We no longer have an empty Allocation.
    10742  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10743  {
    10744  m_HasEmptyBlock = false;
    10745  }
    10746  // Allocate from this pBlock.
    10747  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10748  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10749  (*pAllocation)->InitBlockAllocation(
    10750  hCurrentPool,
    10751  pBestRequestBlock,
    10752  bestRequest.offset,
    10753  alignment,
    10754  size,
    10755  suballocType,
    10756  mapped,
    10757  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10758  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10759  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10760  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10761  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10762  {
    10763  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10764  }
    10765  if(IsCorruptionDetectionEnabled())
    10766  {
    10767  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10768  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10769  }
    10770  return VK_SUCCESS;
    10771  }
    10772  // else: Some allocations must have been touched while we are here. Next try.
    10773  }
    10774  else
    10775  {
    10776  // Could not find place in any of the blocks - break outer loop.
    10777  break;
    10778  }
    10779  }
    10780  /* Maximum number of tries exceeded - a very unlike event when many other
    10781  threads are simultaneously touching allocations making it impossible to make
    10782  lost at the same time as we try to allocate. */
    10783  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10784  {
    10785  return VK_ERROR_TOO_MANY_OBJECTS;
    10786  }
    10787  }
    10788 
    10789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10790 }
    10791 
    10792 void VmaBlockVector::Free(
    10793  VmaAllocation hAllocation)
    10794 {
    10795  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10796 
    10797  // Scope for lock.
    10798  {
    10799  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10800 
    10801  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10802 
    10803  if(IsCorruptionDetectionEnabled())
    10804  {
    10805  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10806  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10807  }
    10808 
    10809  if(hAllocation->IsPersistentMap())
    10810  {
    10811  pBlock->Unmap(m_hAllocator, 1);
    10812  }
    10813 
    10814  pBlock->m_pMetadata->Free(hAllocation);
    10815  VMA_HEAVY_ASSERT(pBlock->Validate());
    10816 
    10817  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10818 
    10819  // pBlock became empty after this deallocation.
    10820  if(pBlock->m_pMetadata->IsEmpty())
    10821  {
    10822  // Already has empty Allocation. We don't want to have two, so delete this one.
    10823  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10824  {
    10825  pBlockToDelete = pBlock;
    10826  Remove(pBlock);
    10827  }
    10828  // We now have first empty block.
    10829  else
    10830  {
    10831  m_HasEmptyBlock = true;
    10832  }
    10833  }
    10834  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10835  // (This is optional, heuristics.)
    10836  else if(m_HasEmptyBlock)
    10837  {
    10838  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10839  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10840  {
    10841  pBlockToDelete = pLastBlock;
    10842  m_Blocks.pop_back();
    10843  m_HasEmptyBlock = false;
    10844  }
    10845  }
    10846 
    10847  IncrementallySortBlocks();
    10848  }
    10849 
    10850  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10851  // lock, for performance reason.
    10852  if(pBlockToDelete != VMA_NULL)
    10853  {
    10854  VMA_DEBUG_LOG(" Deleted empty allocation");
    10855  pBlockToDelete->Destroy(m_hAllocator);
    10856  vma_delete(m_hAllocator, pBlockToDelete);
    10857  }
    10858 }
    10859 
    10860 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10861 {
    10862  VkDeviceSize result = 0;
    10863  for(size_t i = m_Blocks.size(); i--; )
    10864  {
    10865  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10866  if(result >= m_PreferredBlockSize)
    10867  {
    10868  break;
    10869  }
    10870  }
    10871  return result;
    10872 }
    10873 
    10874 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10875 {
    10876  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10877  {
    10878  if(m_Blocks[blockIndex] == pBlock)
    10879  {
    10880  VmaVectorRemove(m_Blocks, blockIndex);
    10881  return;
    10882  }
    10883  }
    10884  VMA_ASSERT(0);
    10885 }
    10886 
    10887 void VmaBlockVector::IncrementallySortBlocks()
    10888 {
    10889  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10890  {
    10891  // Bubble sort only until first swap.
    10892  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10893  {
    10894  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10895  {
    10896  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10897  return;
    10898  }
    10899  }
    10900  }
    10901 }
    10902 
    10903 VkResult VmaBlockVector::AllocateFromBlock(
    10904  VmaDeviceMemoryBlock* pBlock,
    10905  VmaPool hCurrentPool,
    10906  uint32_t currentFrameIndex,
    10907  VkDeviceSize size,
    10908  VkDeviceSize alignment,
    10909  VmaAllocationCreateFlags allocFlags,
    10910  void* pUserData,
    10911  VmaSuballocationType suballocType,
    10912  uint32_t strategy,
    10913  VmaAllocation* pAllocation)
    10914 {
    10915  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10916  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10917  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10918  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10919 
    10920  VmaAllocationRequest currRequest = {};
    10921  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10922  currentFrameIndex,
    10923  m_FrameInUseCount,
    10924  m_BufferImageGranularity,
    10925  size,
    10926  alignment,
    10927  isUpperAddress,
    10928  suballocType,
    10929  false, // canMakeOtherLost
    10930  strategy,
    10931  &currRequest))
    10932  {
    10933  // Allocate from pCurrBlock.
    10934  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10935 
    10936  if(mapped)
    10937  {
    10938  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10939  if(res != VK_SUCCESS)
    10940  {
    10941  return res;
    10942  }
    10943  }
    10944 
    10945  // We no longer have an empty Allocation.
    10946  if(pBlock->m_pMetadata->IsEmpty())
    10947  {
    10948  m_HasEmptyBlock = false;
    10949  }
    10950 
    10951  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10952  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10953  (*pAllocation)->InitBlockAllocation(
    10954  hCurrentPool,
    10955  pBlock,
    10956  currRequest.offset,
    10957  alignment,
    10958  size,
    10959  suballocType,
    10960  mapped,
    10961  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10962  VMA_HEAVY_ASSERT(pBlock->Validate());
    10963  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10965  {
    10966  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10967  }
    10968  if(IsCorruptionDetectionEnabled())
    10969  {
    10970  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10971  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10972  }
    10973  return VK_SUCCESS;
    10974  }
    10975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10976 }
    10977 
    10978 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10979 {
    10980  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10981  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10982  allocInfo.allocationSize = blockSize;
    10983  VkDeviceMemory mem = VK_NULL_HANDLE;
    10984  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10985  if(res < 0)
    10986  {
    10987  return res;
    10988  }
    10989 
    10990  // New VkDeviceMemory successfully created.
    10991 
    10992  // Create new Allocation for it.
    10993  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10994  pBlock->Init(
    10995  m_hAllocator,
    10996  m_MemoryTypeIndex,
    10997  mem,
    10998  allocInfo.allocationSize,
    10999  m_NextBlockId++,
    11000  m_Algorithm);
    11001 
    11002  m_Blocks.push_back(pBlock);
    11003  if(pNewBlockIndex != VMA_NULL)
    11004  {
    11005  *pNewBlockIndex = m_Blocks.size() - 1;
    11006  }
    11007 
    11008  return VK_SUCCESS;
    11009 }
    11010 
    11011 #if VMA_STATS_STRING_ENABLED
    11012 
    11013 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11014 {
    11015  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11016 
    11017  json.BeginObject();
    11018 
    11019  if(m_IsCustomPool)
    11020  {
    11021  json.WriteString("MemoryTypeIndex");
    11022  json.WriteNumber(m_MemoryTypeIndex);
    11023 
    11024  json.WriteString("BlockSize");
    11025  json.WriteNumber(m_PreferredBlockSize);
    11026 
    11027  json.WriteString("BlockCount");
    11028  json.BeginObject(true);
    11029  if(m_MinBlockCount > 0)
    11030  {
    11031  json.WriteString("Min");
    11032  json.WriteNumber((uint64_t)m_MinBlockCount);
    11033  }
    11034  if(m_MaxBlockCount < SIZE_MAX)
    11035  {
    11036  json.WriteString("Max");
    11037  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11038  }
    11039  json.WriteString("Cur");
    11040  json.WriteNumber((uint64_t)m_Blocks.size());
    11041  json.EndObject();
    11042 
    11043  if(m_FrameInUseCount > 0)
    11044  {
    11045  json.WriteString("FrameInUseCount");
    11046  json.WriteNumber(m_FrameInUseCount);
    11047  }
    11048 
    11049  if(m_Algorithm != 0)
    11050  {
    11051  json.WriteString("Algorithm");
    11052  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11053  }
    11054  }
    11055  else
    11056  {
    11057  json.WriteString("PreferredBlockSize");
    11058  json.WriteNumber(m_PreferredBlockSize);
    11059  }
    11060 
    11061  json.WriteString("Blocks");
    11062  json.BeginObject();
    11063  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11064  {
    11065  json.BeginString();
    11066  json.ContinueString(m_Blocks[i]->GetId());
    11067  json.EndString();
    11068 
    11069  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11070  }
    11071  json.EndObject();
    11072 
    11073  json.EndObject();
    11074 }
    11075 
    11076 #endif // #if VMA_STATS_STRING_ENABLED
    11077 
    11078 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11079  VmaAllocator hAllocator,
    11080  uint32_t currentFrameIndex)
    11081 {
    11082  if(m_pDefragmentator == VMA_NULL)
    11083  {
    11084  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11085  hAllocator,
    11086  this,
    11087  currentFrameIndex);
    11088  }
    11089 
    11090  return m_pDefragmentator;
    11091 }
    11092 
    11093 VkResult VmaBlockVector::Defragment(
    11094  VmaDefragmentationStats* pDefragmentationStats,
    11095  VkDeviceSize& maxBytesToMove,
    11096  uint32_t& maxAllocationsToMove)
    11097 {
    11098  if(m_pDefragmentator == VMA_NULL)
    11099  {
    11100  return VK_SUCCESS;
    11101  }
    11102 
    11103  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11104 
    11105  // Defragment.
    11106  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11107 
    11108  // Accumulate statistics.
    11109  if(pDefragmentationStats != VMA_NULL)
    11110  {
    11111  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11112  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11113  pDefragmentationStats->bytesMoved += bytesMoved;
    11114  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11115  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11116  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11117  maxBytesToMove -= bytesMoved;
    11118  maxAllocationsToMove -= allocationsMoved;
    11119  }
    11120 
    11121  // Free empty blocks.
    11122  m_HasEmptyBlock = false;
    11123  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11124  {
    11125  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11126  if(pBlock->m_pMetadata->IsEmpty())
    11127  {
    11128  if(m_Blocks.size() > m_MinBlockCount)
    11129  {
    11130  if(pDefragmentationStats != VMA_NULL)
    11131  {
    11132  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11133  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11134  }
    11135 
    11136  VmaVectorRemove(m_Blocks, blockIndex);
    11137  pBlock->Destroy(m_hAllocator);
    11138  vma_delete(m_hAllocator, pBlock);
    11139  }
    11140  else
    11141  {
    11142  m_HasEmptyBlock = true;
    11143  }
    11144  }
    11145  }
    11146 
    11147  return result;
    11148 }
    11149 
    11150 void VmaBlockVector::DestroyDefragmentator()
    11151 {
    11152  if(m_pDefragmentator != VMA_NULL)
    11153  {
    11154  vma_delete(m_hAllocator, m_pDefragmentator);
    11155  m_pDefragmentator = VMA_NULL;
    11156  }
    11157 }
    11158 
    11159 void VmaBlockVector::MakePoolAllocationsLost(
    11160  uint32_t currentFrameIndex,
    11161  size_t* pLostAllocationCount)
    11162 {
    11163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11164  size_t lostAllocationCount = 0;
    11165  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11166  {
    11167  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11168  VMA_ASSERT(pBlock);
    11169  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11170  }
    11171  if(pLostAllocationCount != VMA_NULL)
    11172  {
    11173  *pLostAllocationCount = lostAllocationCount;
    11174  }
    11175 }
    11176 
    11177 VkResult VmaBlockVector::CheckCorruption()
    11178 {
    11179  if(!IsCorruptionDetectionEnabled())
    11180  {
    11181  return VK_ERROR_FEATURE_NOT_PRESENT;
    11182  }
    11183 
    11184  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11185  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11186  {
    11187  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11188  VMA_ASSERT(pBlock);
    11189  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11190  if(res != VK_SUCCESS)
    11191  {
    11192  return res;
    11193  }
    11194  }
    11195  return VK_SUCCESS;
    11196 }
    11197 
    11198 void VmaBlockVector::AddStats(VmaStats* pStats)
    11199 {
    11200  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11201  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11202 
    11203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11204 
    11205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11206  {
    11207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11208  VMA_ASSERT(pBlock);
    11209  VMA_HEAVY_ASSERT(pBlock->Validate());
    11210  VmaStatInfo allocationStatInfo;
    11211  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11212  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11213  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11214  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11215  }
    11216 }
    11217 
    11219 // VmaDefragmentator members definition
    11220 
    11221 VmaDefragmentator::VmaDefragmentator(
    11222  VmaAllocator hAllocator,
    11223  VmaBlockVector* pBlockVector,
    11224  uint32_t currentFrameIndex) :
    11225  m_hAllocator(hAllocator),
    11226  m_pBlockVector(pBlockVector),
    11227  m_CurrentFrameIndex(currentFrameIndex),
    11228  m_BytesMoved(0),
    11229  m_AllocationsMoved(0),
    11230  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11231  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11232 {
    11233  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11234 }
    11235 
    11236 VmaDefragmentator::~VmaDefragmentator()
    11237 {
    11238  for(size_t i = m_Blocks.size(); i--; )
    11239  {
    11240  vma_delete(m_hAllocator, m_Blocks[i]);
    11241  }
    11242 }
    11243 
    11244 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11245 {
    11246  AllocationInfo allocInfo;
    11247  allocInfo.m_hAllocation = hAlloc;
    11248  allocInfo.m_pChanged = pChanged;
    11249  m_Allocations.push_back(allocInfo);
    11250 }
    11251 
    11252 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11253 {
    11254  // It has already been mapped for defragmentation.
    11255  if(m_pMappedDataForDefragmentation)
    11256  {
    11257  *ppMappedData = m_pMappedDataForDefragmentation;
    11258  return VK_SUCCESS;
    11259  }
    11260 
    11261  // It is originally mapped.
    11262  if(m_pBlock->GetMappedData())
    11263  {
    11264  *ppMappedData = m_pBlock->GetMappedData();
    11265  return VK_SUCCESS;
    11266  }
    11267 
    11268  // Map on first usage.
    11269  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11270  *ppMappedData = m_pMappedDataForDefragmentation;
    11271  return res;
    11272 }
    11273 
    11274 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11275 {
    11276  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11277  {
    11278  m_pBlock->Unmap(hAllocator, 1);
    11279  }
    11280 }
    11281 
    11282 VkResult VmaDefragmentator::DefragmentRound(
    11283  VkDeviceSize maxBytesToMove,
    11284  uint32_t maxAllocationsToMove)
    11285 {
    11286  if(m_Blocks.empty())
    11287  {
    11288  return VK_SUCCESS;
    11289  }
    11290 
    11291  size_t srcBlockIndex = m_Blocks.size() - 1;
    11292  size_t srcAllocIndex = SIZE_MAX;
    11293  for(;;)
    11294  {
    11295  // 1. Find next allocation to move.
    11296  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11297  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11298  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11299  {
    11300  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11301  {
    11302  // Finished: no more allocations to process.
    11303  if(srcBlockIndex == 0)
    11304  {
    11305  return VK_SUCCESS;
    11306  }
    11307  else
    11308  {
    11309  --srcBlockIndex;
    11310  srcAllocIndex = SIZE_MAX;
    11311  }
    11312  }
    11313  else
    11314  {
    11315  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11316  }
    11317  }
    11318 
    11319  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11320  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11321 
    11322  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11323  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11324  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11325  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11326 
    11327  // 2. Try to find new place for this allocation in preceding or current block.
    11328  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11329  {
    11330  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11331  VmaAllocationRequest dstAllocRequest;
    11332  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11333  m_CurrentFrameIndex,
    11334  m_pBlockVector->GetFrameInUseCount(),
    11335  m_pBlockVector->GetBufferImageGranularity(),
    11336  size,
    11337  alignment,
    11338  false, // upperAddress
    11339  suballocType,
    11340  false, // canMakeOtherLost
    11342  &dstAllocRequest) &&
    11343  MoveMakesSense(
    11344  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11345  {
    11346  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11347 
    11348  // Reached limit on number of allocations or bytes to move.
    11349  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11350  (m_BytesMoved + size > maxBytesToMove))
    11351  {
    11352  return VK_INCOMPLETE;
    11353  }
    11354 
    11355  void* pDstMappedData = VMA_NULL;
    11356  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11357  if(res != VK_SUCCESS)
    11358  {
    11359  return res;
    11360  }
    11361 
    11362  void* pSrcMappedData = VMA_NULL;
    11363  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11364  if(res != VK_SUCCESS)
    11365  {
    11366  return res;
    11367  }
    11368 
    11369  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11370  memcpy(
    11371  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11372  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11373  static_cast<size_t>(size));
    11374 
    11375  if(VMA_DEBUG_MARGIN > 0)
    11376  {
    11377  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11378  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11379  }
    11380 
    11381  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11382  dstAllocRequest,
    11383  suballocType,
    11384  size,
    11385  false, // upperAddress
    11386  allocInfo.m_hAllocation);
    11387  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11388 
    11389  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11390 
    11391  if(allocInfo.m_pChanged != VMA_NULL)
    11392  {
    11393  *allocInfo.m_pChanged = VK_TRUE;
    11394  }
    11395 
    11396  ++m_AllocationsMoved;
    11397  m_BytesMoved += size;
    11398 
    11399  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11400 
    11401  break;
    11402  }
    11403  }
    11404 
    11405  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11406 
    11407  if(srcAllocIndex > 0)
    11408  {
    11409  --srcAllocIndex;
    11410  }
    11411  else
    11412  {
    11413  if(srcBlockIndex > 0)
    11414  {
    11415  --srcBlockIndex;
    11416  srcAllocIndex = SIZE_MAX;
    11417  }
    11418  else
    11419  {
    11420  return VK_SUCCESS;
    11421  }
    11422  }
    11423  }
    11424 }
    11425 
    11426 VkResult VmaDefragmentator::Defragment(
    11427  VkDeviceSize maxBytesToMove,
    11428  uint32_t maxAllocationsToMove)
    11429 {
    11430  if(m_Allocations.empty())
    11431  {
    11432  return VK_SUCCESS;
    11433  }
    11434 
    11435  // Create block info for each block.
    11436  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11437  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11438  {
    11439  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11440  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11441  m_Blocks.push_back(pBlockInfo);
    11442  }
    11443 
    11444  // Sort them by m_pBlock pointer value.
    11445  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11446 
    11447  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11448  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11449  {
    11450  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11451  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11452  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11453  {
    11454  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11455  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11456  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11457  {
    11458  (*it)->m_Allocations.push_back(allocInfo);
    11459  }
    11460  else
    11461  {
    11462  VMA_ASSERT(0);
    11463  }
    11464  }
    11465  }
    11466  m_Allocations.clear();
    11467 
    11468  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11469  {
    11470  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11471  pBlockInfo->CalcHasNonMovableAllocations();
    11472  pBlockInfo->SortAllocationsBySizeDescecnding();
    11473  }
    11474 
    11475  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11476  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11477 
    11478  // Execute defragmentation rounds (the main part).
    11479  VkResult result = VK_SUCCESS;
    11480  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11481  {
    11482  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11483  }
    11484 
    11485  // Unmap blocks that were mapped for defragmentation.
    11486  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11487  {
    11488  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11489  }
    11490 
    11491  return result;
    11492 }
    11493 
    11494 bool VmaDefragmentator::MoveMakesSense(
    11495  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11496  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11497 {
    11498  if(dstBlockIndex < srcBlockIndex)
    11499  {
    11500  return true;
    11501  }
    11502  if(dstBlockIndex > srcBlockIndex)
    11503  {
    11504  return false;
    11505  }
    11506  if(dstOffset < srcOffset)
    11507  {
    11508  return true;
    11509  }
    11510  return false;
    11511 }
    11512 
    11514 // VmaRecorder
    11515 
    11516 #if VMA_RECORDING_ENABLED
    11517 
    11518 VmaRecorder::VmaRecorder() :
    11519  m_UseMutex(true),
    11520  m_Flags(0),
    11521  m_File(VMA_NULL),
    11522  m_Freq(INT64_MAX),
    11523  m_StartCounter(INT64_MAX)
    11524 {
    11525 }
    11526 
    11527 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11528 {
    11529  m_UseMutex = useMutex;
    11530  m_Flags = settings.flags;
    11531 
    11532  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11533  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11534 
    11535  // Open file for writing.
    11536  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11537  if(err != 0)
    11538  {
    11539  return VK_ERROR_INITIALIZATION_FAILED;
    11540  }
    11541 
    11542  // Write header.
    11543  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11544  fprintf(m_File, "%s\n", "1,4");
    11545 
    11546  return VK_SUCCESS;
    11547 }
    11548 
    11549 VmaRecorder::~VmaRecorder()
    11550 {
    11551  if(m_File != VMA_NULL)
    11552  {
    11553  fclose(m_File);
    11554  }
    11555 }
    11556 
    11557 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11564  Flush();
    11565 }
    11566 
    11567 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11568 {
    11569  CallParams callParams;
    11570  GetBasicParams(callParams);
    11571 
    11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11573  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11574  Flush();
    11575 }
    11576 
    11577 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11578 {
    11579  CallParams callParams;
    11580  GetBasicParams(callParams);
    11581 
    11582  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11583  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11584  createInfo.memoryTypeIndex,
    11585  createInfo.flags,
    11586  createInfo.blockSize,
    11587  (uint64_t)createInfo.minBlockCount,
    11588  (uint64_t)createInfo.maxBlockCount,
    11589  createInfo.frameInUseCount,
    11590  pool);
    11591  Flush();
    11592 }
    11593 
    11594 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11595 {
    11596  CallParams callParams;
    11597  GetBasicParams(callParams);
    11598 
    11599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11600  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11601  pool);
    11602  Flush();
    11603 }
    11604 
    11605 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11606  const VkMemoryRequirements& vkMemReq,
    11607  const VmaAllocationCreateInfo& createInfo,
    11608  VmaAllocation allocation)
    11609 {
    11610  CallParams callParams;
    11611  GetBasicParams(callParams);
    11612 
    11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11614  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11615  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11616  vkMemReq.size,
    11617  vkMemReq.alignment,
    11618  vkMemReq.memoryTypeBits,
    11619  createInfo.flags,
    11620  createInfo.usage,
    11621  createInfo.requiredFlags,
    11622  createInfo.preferredFlags,
    11623  createInfo.memoryTypeBits,
    11624  createInfo.pool,
    11625  allocation,
    11626  userDataStr.GetString());
    11627  Flush();
    11628 }
    11629 
    11630 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11631  const VkMemoryRequirements& vkMemReq,
    11632  bool requiresDedicatedAllocation,
    11633  bool prefersDedicatedAllocation,
    11634  const VmaAllocationCreateInfo& createInfo,
    11635  VmaAllocation allocation)
    11636 {
    11637  CallParams callParams;
    11638  GetBasicParams(callParams);
    11639 
    11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11641  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11642  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11643  vkMemReq.size,
    11644  vkMemReq.alignment,
    11645  vkMemReq.memoryTypeBits,
    11646  requiresDedicatedAllocation ? 1 : 0,
    11647  prefersDedicatedAllocation ? 1 : 0,
    11648  createInfo.flags,
    11649  createInfo.usage,
    11650  createInfo.requiredFlags,
    11651  createInfo.preferredFlags,
    11652  createInfo.memoryTypeBits,
    11653  createInfo.pool,
    11654  allocation,
    11655  userDataStr.GetString());
    11656  Flush();
    11657 }
    11658 
    11659 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11660  const VkMemoryRequirements& vkMemReq,
    11661  bool requiresDedicatedAllocation,
    11662  bool prefersDedicatedAllocation,
    11663  const VmaAllocationCreateInfo& createInfo,
    11664  VmaAllocation allocation)
    11665 {
    11666  CallParams callParams;
    11667  GetBasicParams(callParams);
    11668 
    11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11672  vkMemReq.size,
    11673  vkMemReq.alignment,
    11674  vkMemReq.memoryTypeBits,
    11675  requiresDedicatedAllocation ? 1 : 0,
    11676  prefersDedicatedAllocation ? 1 : 0,
    11677  createInfo.flags,
    11678  createInfo.usage,
    11679  createInfo.requiredFlags,
    11680  createInfo.preferredFlags,
    11681  createInfo.memoryTypeBits,
    11682  createInfo.pool,
    11683  allocation,
    11684  userDataStr.GetString());
    11685  Flush();
    11686 }
    11687 
    11688 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11689  VmaAllocation allocation)
    11690 {
    11691  CallParams callParams;
    11692  GetBasicParams(callParams);
    11693 
    11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11695  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11696  allocation);
    11697  Flush();
    11698 }
    11699 
    11700 void VmaRecorder::RecordResizeAllocation(
    11701  uint32_t frameIndex,
    11702  VmaAllocation allocation,
    11703  VkDeviceSize newSize)
    11704 {
    11705  CallParams callParams;
    11706  GetBasicParams(callParams);
    11707 
    11708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11709  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11710  allocation, newSize);
    11711  Flush();
    11712 }
    11713 
    11714 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11715  VmaAllocation allocation,
    11716  const void* pUserData)
    11717 {
    11718  CallParams callParams;
    11719  GetBasicParams(callParams);
    11720 
    11721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11722  UserDataString userDataStr(
    11723  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11724  pUserData);
    11725  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11726  allocation,
    11727  userDataStr.GetString());
    11728  Flush();
    11729 }
    11730 
    11731 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11732  VmaAllocation allocation)
    11733 {
    11734  CallParams callParams;
    11735  GetBasicParams(callParams);
    11736 
    11737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11738  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11739  allocation);
    11740  Flush();
    11741 }
    11742 
    11743 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11744  VmaAllocation allocation)
    11745 {
    11746  CallParams callParams;
    11747  GetBasicParams(callParams);
    11748 
    11749  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11750  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11751  allocation);
    11752  Flush();
    11753 }
    11754 
    11755 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11756  VmaAllocation allocation)
    11757 {
    11758  CallParams callParams;
    11759  GetBasicParams(callParams);
    11760 
    11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11762  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11763  allocation);
    11764  Flush();
    11765 }
    11766 
    11767 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11768  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11769 {
    11770  CallParams callParams;
    11771  GetBasicParams(callParams);
    11772 
    11773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11774  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11775  allocation,
    11776  offset,
    11777  size);
    11778  Flush();
    11779 }
    11780 
    11781 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11782  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11783 {
    11784  CallParams callParams;
    11785  GetBasicParams(callParams);
    11786 
    11787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11788  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11789  allocation,
    11790  offset,
    11791  size);
    11792  Flush();
    11793 }
    11794 
    11795 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11796  const VkBufferCreateInfo& bufCreateInfo,
    11797  const VmaAllocationCreateInfo& allocCreateInfo,
    11798  VmaAllocation allocation)
    11799 {
    11800  CallParams callParams;
    11801  GetBasicParams(callParams);
    11802 
    11803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11804  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11805  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11806  bufCreateInfo.flags,
    11807  bufCreateInfo.size,
    11808  bufCreateInfo.usage,
    11809  bufCreateInfo.sharingMode,
    11810  allocCreateInfo.flags,
    11811  allocCreateInfo.usage,
    11812  allocCreateInfo.requiredFlags,
    11813  allocCreateInfo.preferredFlags,
    11814  allocCreateInfo.memoryTypeBits,
    11815  allocCreateInfo.pool,
    11816  allocation,
    11817  userDataStr.GetString());
    11818  Flush();
    11819 }
    11820 
    11821 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11822  const VkImageCreateInfo& imageCreateInfo,
    11823  const VmaAllocationCreateInfo& allocCreateInfo,
    11824  VmaAllocation allocation)
    11825 {
    11826  CallParams callParams;
    11827  GetBasicParams(callParams);
    11828 
    11829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11830  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11831  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11832  imageCreateInfo.flags,
    11833  imageCreateInfo.imageType,
    11834  imageCreateInfo.format,
    11835  imageCreateInfo.extent.width,
    11836  imageCreateInfo.extent.height,
    11837  imageCreateInfo.extent.depth,
    11838  imageCreateInfo.mipLevels,
    11839  imageCreateInfo.arrayLayers,
    11840  imageCreateInfo.samples,
    11841  imageCreateInfo.tiling,
    11842  imageCreateInfo.usage,
    11843  imageCreateInfo.sharingMode,
    11844  imageCreateInfo.initialLayout,
    11845  allocCreateInfo.flags,
    11846  allocCreateInfo.usage,
    11847  allocCreateInfo.requiredFlags,
    11848  allocCreateInfo.preferredFlags,
    11849  allocCreateInfo.memoryTypeBits,
    11850  allocCreateInfo.pool,
    11851  allocation,
    11852  userDataStr.GetString());
    11853  Flush();
    11854 }
    11855 
    11856 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11857  VmaAllocation allocation)
    11858 {
    11859  CallParams callParams;
    11860  GetBasicParams(callParams);
    11861 
    11862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11863  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11864  allocation);
    11865  Flush();
    11866 }
    11867 
    11868 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11869  VmaAllocation allocation)
    11870 {
    11871  CallParams callParams;
    11872  GetBasicParams(callParams);
    11873 
    11874  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11875  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11876  allocation);
    11877  Flush();
    11878 }
    11879 
    11880 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11881  VmaAllocation allocation)
    11882 {
    11883  CallParams callParams;
    11884  GetBasicParams(callParams);
    11885 
    11886  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11887  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11888  allocation);
    11889  Flush();
    11890 }
    11891 
    11892 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11893  VmaAllocation allocation)
    11894 {
    11895  CallParams callParams;
    11896  GetBasicParams(callParams);
    11897 
    11898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11899  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11900  allocation);
    11901  Flush();
    11902 }
    11903 
    11904 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11905  VmaPool pool)
    11906 {
    11907  CallParams callParams;
    11908  GetBasicParams(callParams);
    11909 
    11910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11911  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11912  pool);
    11913  Flush();
    11914 }
    11915 
    11916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11917 {
    11918  if(pUserData != VMA_NULL)
    11919  {
    11920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11921  {
    11922  m_Str = (const char*)pUserData;
    11923  }
    11924  else
    11925  {
    11926  sprintf_s(m_PtrStr, "%p", pUserData);
    11927  m_Str = m_PtrStr;
    11928  }
    11929  }
    11930  else
    11931  {
    11932  m_Str = "";
    11933  }
    11934 }
    11935 
    11936 void VmaRecorder::WriteConfiguration(
    11937  const VkPhysicalDeviceProperties& devProps,
    11938  const VkPhysicalDeviceMemoryProperties& memProps,
    11939  bool dedicatedAllocationExtensionEnabled)
    11940 {
    11941  fprintf(m_File, "Config,Begin\n");
    11942 
    11943  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11944  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11945  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11946  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11947  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11948  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11949 
    11950  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11951  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11952  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11953 
    11954  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11955  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11956  {
    11957  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11958  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11959  }
    11960  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11961  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11965  }
    11966 
    11967  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11968 
    11969  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11970  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11971  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11972  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11973  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11974  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11976  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11977  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11978 
    11979  fprintf(m_File, "Config,End\n");
    11980 }
    11981 
    11982 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11983 {
    11984  outParams.threadId = GetCurrentThreadId();
    11985 
    11986  LARGE_INTEGER counter;
    11987  QueryPerformanceCounter(&counter);
    11988  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11989 }
    11990 
    11991 void VmaRecorder::Flush()
    11992 {
    11993  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11994  {
    11995  fflush(m_File);
    11996  }
    11997 }
    11998 
    11999 #endif // #if VMA_RECORDING_ENABLED
    12000 
    12002 // VmaAllocator_T
    12003 
    12004 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12005  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12006  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12007  m_hDevice(pCreateInfo->device),
    12008  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12009  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12010  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12011  m_PreferredLargeHeapBlockSize(0),
    12012  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12013  m_CurrentFrameIndex(0),
    12014  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12015  m_NextPoolId(0)
    12017  ,m_pRecorder(VMA_NULL)
    12018 #endif
    12019 {
    12020  if(VMA_DEBUG_DETECT_CORRUPTION)
    12021  {
    12022  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12023  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12024  }
    12025 
    12026  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12027 
    12028 #if !(VMA_DEDICATED_ALLOCATION)
    12030  {
    12031  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12032  }
    12033 #endif
    12034 
    12035  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12036  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12037  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12038 
    12039  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12040  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12041 
    12042  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12043  {
    12044  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12045  }
    12046 
    12047  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12048  {
    12049  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12050  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12051  }
    12052 
    12053  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12054 
    12055  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12056  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12057 
    12058  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12059  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12060  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12061  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12062 
    12063  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12064  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12065 
    12066  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12067  {
    12068  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12069  {
    12070  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12071  if(limit != VK_WHOLE_SIZE)
    12072  {
    12073  m_HeapSizeLimit[heapIndex] = limit;
    12074  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12075  {
    12076  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12077  }
    12078  }
    12079  }
    12080  }
    12081 
    12082  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12083  {
    12084  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12085 
    12086  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12087  this,
    12088  memTypeIndex,
    12089  preferredBlockSize,
    12090  0,
    12091  SIZE_MAX,
    12092  GetBufferImageGranularity(),
    12093  pCreateInfo->frameInUseCount,
    12094  false, // isCustomPool
    12095  false, // explicitBlockSize
    12096  false); // linearAlgorithm
    12097  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12098  // becase minBlockCount is 0.
    12099  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12100 
    12101  }
    12102 }
    12103 
    12104 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12105 {
    12106  VkResult res = VK_SUCCESS;
    12107 
    12108  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12109  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12110  {
    12111 #if VMA_RECORDING_ENABLED
    12112  m_pRecorder = vma_new(this, VmaRecorder)();
    12113  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12114  if(res != VK_SUCCESS)
    12115  {
    12116  return res;
    12117  }
    12118  m_pRecorder->WriteConfiguration(
    12119  m_PhysicalDeviceProperties,
    12120  m_MemProps,
    12121  m_UseKhrDedicatedAllocation);
    12122  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12123 #else
    12124  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12125  return VK_ERROR_FEATURE_NOT_PRESENT;
    12126 #endif
    12127  }
    12128 
    12129  return res;
    12130 }
    12131 
    12132 VmaAllocator_T::~VmaAllocator_T()
    12133 {
    12134 #if VMA_RECORDING_ENABLED
    12135  if(m_pRecorder != VMA_NULL)
    12136  {
    12137  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12138  vma_delete(this, m_pRecorder);
    12139  }
    12140 #endif
    12141 
    12142  VMA_ASSERT(m_Pools.empty());
    12143 
    12144  for(size_t i = GetMemoryTypeCount(); i--; )
    12145  {
    12146  vma_delete(this, m_pDedicatedAllocations[i]);
    12147  vma_delete(this, m_pBlockVectors[i]);
    12148  }
    12149 }
    12150 
    12151 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12152 {
    12153 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12154  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12155  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12156  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12157  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12158  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12159  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12160  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12161  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12162  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12163  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12164  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12165  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12166  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12167  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12168  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12169  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12170 #if VMA_DEDICATED_ALLOCATION
    12171  if(m_UseKhrDedicatedAllocation)
    12172  {
    12173  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12174  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12175  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12176  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12180 
    12181 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12182  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12183 
    12184  if(pVulkanFunctions != VMA_NULL)
    12185  {
    12186  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12187  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12188  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12189  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12190  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12191  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12192  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12193  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12194  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12197  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12198  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12199  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12200  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12201  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12202 #if VMA_DEDICATED_ALLOCATION
    12203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12205 #endif
    12206  }
    12207 
    12208 #undef VMA_COPY_IF_NOT_NULL
    12209 
    12210  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12211  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12212  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12213  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12214  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12215  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12216  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12217  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12228 #if VMA_DEDICATED_ALLOCATION
    12229  if(m_UseKhrDedicatedAllocation)
    12230  {
    12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12233  }
    12234 #endif
    12235 }
    12236 
    12237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12238 {
    12239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12243 }
    12244 
    12245 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12246  VkDeviceSize size,
    12247  VkDeviceSize alignment,
    12248  bool dedicatedAllocation,
    12249  VkBuffer dedicatedBuffer,
    12250  VkImage dedicatedImage,
    12251  const VmaAllocationCreateInfo& createInfo,
    12252  uint32_t memTypeIndex,
    12253  VmaSuballocationType suballocType,
    12254  VmaAllocation* pAllocation)
    12255 {
    12256  VMA_ASSERT(pAllocation != VMA_NULL);
    12257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12258 
    12259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12260 
    12261  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12264  {
    12265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12266  }
    12267 
    12268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12269  VMA_ASSERT(blockVector);
    12270 
    12271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12272  bool preferDedicatedMemory =
    12273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12274  dedicatedAllocation ||
    12275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12276  size > preferredBlockSize / 2;
    12277 
    12278  if(preferDedicatedMemory &&
    12279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12280  finalCreateInfo.pool == VK_NULL_HANDLE)
    12281  {
    12283  }
    12284 
    12285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12286  {
    12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12288  {
    12289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12290  }
    12291  else
    12292  {
    12293  return AllocateDedicatedMemory(
    12294  size,
    12295  suballocType,
    12296  memTypeIndex,
    12297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12299  finalCreateInfo.pUserData,
    12300  dedicatedBuffer,
    12301  dedicatedImage,
    12302  pAllocation);
    12303  }
    12304  }
    12305  else
    12306  {
    12307  VkResult res = blockVector->Allocate(
    12308  VK_NULL_HANDLE, // hCurrentPool
    12309  m_CurrentFrameIndex.load(),
    12310  size,
    12311  alignment,
    12312  finalCreateInfo,
    12313  suballocType,
    12314  pAllocation);
    12315  if(res == VK_SUCCESS)
    12316  {
    12317  return res;
    12318  }
    12319 
    12320  // 5. Try dedicated memory.
    12321  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12322  {
    12323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12324  }
    12325  else
    12326  {
    12327  res = AllocateDedicatedMemory(
    12328  size,
    12329  suballocType,
    12330  memTypeIndex,
    12331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12332  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12333  finalCreateInfo.pUserData,
    12334  dedicatedBuffer,
    12335  dedicatedImage,
    12336  pAllocation);
    12337  if(res == VK_SUCCESS)
    12338  {
    12339  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12340  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12341  return VK_SUCCESS;
    12342  }
    12343  else
    12344  {
    12345  // Everything failed: Return error code.
    12346  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12347  return res;
    12348  }
    12349  }
    12350  }
    12351 }
    12352 
    12353 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12354  VkDeviceSize size,
    12355  VmaSuballocationType suballocType,
    12356  uint32_t memTypeIndex,
    12357  bool map,
    12358  bool isUserDataString,
    12359  void* pUserData,
    12360  VkBuffer dedicatedBuffer,
    12361  VkImage dedicatedImage,
    12362  VmaAllocation* pAllocation)
    12363 {
    12364  VMA_ASSERT(pAllocation);
    12365 
    12366  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12367  allocInfo.memoryTypeIndex = memTypeIndex;
    12368  allocInfo.allocationSize = size;
    12369 
    12370 #if VMA_DEDICATED_ALLOCATION
    12371  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12372  if(m_UseKhrDedicatedAllocation)
    12373  {
    12374  if(dedicatedBuffer != VK_NULL_HANDLE)
    12375  {
    12376  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12377  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12378  allocInfo.pNext = &dedicatedAllocInfo;
    12379  }
    12380  else if(dedicatedImage != VK_NULL_HANDLE)
    12381  {
    12382  dedicatedAllocInfo.image = dedicatedImage;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  }
    12386 #endif // #if VMA_DEDICATED_ALLOCATION
    12387 
    12388  // Allocate VkDeviceMemory.
    12389  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12390  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12391  if(res < 0)
    12392  {
    12393  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12394  return res;
    12395  }
    12396 
    12397  void* pMappedData = VMA_NULL;
    12398  if(map)
    12399  {
    12400  res = (*m_VulkanFunctions.vkMapMemory)(
    12401  m_hDevice,
    12402  hMemory,
    12403  0,
    12404  VK_WHOLE_SIZE,
    12405  0,
    12406  &pMappedData);
    12407  if(res < 0)
    12408  {
    12409  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12410  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12411  return res;
    12412  }
    12413  }
    12414 
    12415  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12416  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12417  (*pAllocation)->SetUserData(this, pUserData);
    12418  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12419  {
    12420  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12421  }
    12422 
    12423  // Register it in m_pDedicatedAllocations.
    12424  {
    12425  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12426  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12427  VMA_ASSERT(pDedicatedAllocations);
    12428  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12429  }
    12430 
    12431  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12432 
    12433  return VK_SUCCESS;
    12434 }
    12435 
    12436 void VmaAllocator_T::GetBufferMemoryRequirements(
    12437  VkBuffer hBuffer,
    12438  VkMemoryRequirements& memReq,
    12439  bool& requiresDedicatedAllocation,
    12440  bool& prefersDedicatedAllocation) const
    12441 {
    12442 #if VMA_DEDICATED_ALLOCATION
    12443  if(m_UseKhrDedicatedAllocation)
    12444  {
    12445  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12446  memReqInfo.buffer = hBuffer;
    12447 
    12448  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12449 
    12450  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12451  memReq2.pNext = &memDedicatedReq;
    12452 
    12453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12454 
    12455  memReq = memReq2.memoryRequirements;
    12456  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12457  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12458  }
    12459  else
    12460 #endif // #if VMA_DEDICATED_ALLOCATION
    12461  {
    12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12463  requiresDedicatedAllocation = false;
    12464  prefersDedicatedAllocation = false;
    12465  }
    12466 }
    12467 
    12468 void VmaAllocator_T::GetImageMemoryRequirements(
    12469  VkImage hImage,
    12470  VkMemoryRequirements& memReq,
    12471  bool& requiresDedicatedAllocation,
    12472  bool& prefersDedicatedAllocation) const
    12473 {
    12474 #if VMA_DEDICATED_ALLOCATION
    12475  if(m_UseKhrDedicatedAllocation)
    12476  {
    12477  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12478  memReqInfo.image = hImage;
    12479 
    12480  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12481 
    12482  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12483  memReq2.pNext = &memDedicatedReq;
    12484 
    12485  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12486 
    12487  memReq = memReq2.memoryRequirements;
    12488  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12489  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12490  }
    12491  else
    12492 #endif // #if VMA_DEDICATED_ALLOCATION
    12493  {
    12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12495  requiresDedicatedAllocation = false;
    12496  prefersDedicatedAllocation = false;
    12497  }
    12498 }
    12499 
    12500 VkResult VmaAllocator_T::AllocateMemory(
    12501  const VkMemoryRequirements& vkMemReq,
    12502  bool requiresDedicatedAllocation,
    12503  bool prefersDedicatedAllocation,
    12504  VkBuffer dedicatedBuffer,
    12505  VkImage dedicatedImage,
    12506  const VmaAllocationCreateInfo& createInfo,
    12507  VmaSuballocationType suballocType,
    12508  VmaAllocation* pAllocation)
    12509 {
    12510  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12511 
    12512  if(vkMemReq.size == 0)
    12513  {
    12514  return VK_ERROR_VALIDATION_FAILED_EXT;
    12515  }
    12516  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12517  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12518  {
    12519  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if(requiresDedicatedAllocation)
    12529  {
    12530  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12531  {
    12532  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12534  }
    12535  if(createInfo.pool != VK_NULL_HANDLE)
    12536  {
    12537  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  }
    12541  if((createInfo.pool != VK_NULL_HANDLE) &&
    12542  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12543  {
    12544  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12546  }
    12547 
    12548  if(createInfo.pool != VK_NULL_HANDLE)
    12549  {
    12550  const VkDeviceSize alignmentForPool = VMA_MAX(
    12551  vkMemReq.alignment,
    12552  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12553  return createInfo.pool->m_BlockVector.Allocate(
    12554  createInfo.pool,
    12555  m_CurrentFrameIndex.load(),
    12556  vkMemReq.size,
    12557  alignmentForPool,
    12558  createInfo,
    12559  suballocType,
    12560  pAllocation);
    12561  }
    12562  else
    12563  {
    12564  // Bit mask of memory Vulkan types acceptable for this allocation.
    12565  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12566  uint32_t memTypeIndex = UINT32_MAX;
    12567  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12568  if(res == VK_SUCCESS)
    12569  {
    12570  VkDeviceSize alignmentForMemType = VMA_MAX(
    12571  vkMemReq.alignment,
    12572  GetMemoryTypeMinAlignment(memTypeIndex));
    12573 
    12574  res = AllocateMemoryOfType(
    12575  vkMemReq.size,
    12576  alignmentForMemType,
    12577  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12578  dedicatedBuffer,
    12579  dedicatedImage,
    12580  createInfo,
    12581  memTypeIndex,
    12582  suballocType,
    12583  pAllocation);
    12584  // Succeeded on first try.
    12585  if(res == VK_SUCCESS)
    12586  {
    12587  return res;
    12588  }
    12589  // Allocation from this memory type failed. Try other compatible memory types.
    12590  else
    12591  {
    12592  for(;;)
    12593  {
    12594  // Remove old memTypeIndex from list of possibilities.
    12595  memoryTypeBits &= ~(1u << memTypeIndex);
    12596  // Find alternative memTypeIndex.
    12597  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12598  if(res == VK_SUCCESS)
    12599  {
    12600  alignmentForMemType = VMA_MAX(
    12601  vkMemReq.alignment,
    12602  GetMemoryTypeMinAlignment(memTypeIndex));
    12603 
    12604  res = AllocateMemoryOfType(
    12605  vkMemReq.size,
    12606  alignmentForMemType,
    12607  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12608  dedicatedBuffer,
    12609  dedicatedImage,
    12610  createInfo,
    12611  memTypeIndex,
    12612  suballocType,
    12613  pAllocation);
    12614  // Allocation from this alternative memory type succeeded.
    12615  if(res == VK_SUCCESS)
    12616  {
    12617  return res;
    12618  }
    12619  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12620  }
    12621  // No other matching memory type index could be found.
    12622  else
    12623  {
    12624  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12625  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12626  }
    12627  }
    12628  }
    12629  }
    12630  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12631  else
    12632  return res;
    12633  }
    12634 }
    12635 
    12636 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12637 {
    12638  VMA_ASSERT(allocation);
    12639 
    12640  if(TouchAllocation(allocation))
    12641  {
    12642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12643  {
    12644  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12645  }
    12646 
    12647  switch(allocation->GetType())
    12648  {
    12649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12650  {
    12651  VmaBlockVector* pBlockVector = VMA_NULL;
    12652  VmaPool hPool = allocation->GetPool();
    12653  if(hPool != VK_NULL_HANDLE)
    12654  {
    12655  pBlockVector = &hPool->m_BlockVector;
    12656  }
    12657  else
    12658  {
    12659  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12660  pBlockVector = m_pBlockVectors[memTypeIndex];
    12661  }
    12662  pBlockVector->Free(allocation);
    12663  }
    12664  break;
    12665  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12666  FreeDedicatedMemory(allocation);
    12667  break;
    12668  default:
    12669  VMA_ASSERT(0);
    12670  }
    12671  }
    12672 
    12673  allocation->SetUserData(this, VMA_NULL);
    12674  vma_delete(this, allocation);
    12675 }
    12676 
    12677 VkResult VmaAllocator_T::ResizeAllocation(
    12678  const VmaAllocation alloc,
    12679  VkDeviceSize newSize)
    12680 {
    12681  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12682  {
    12683  return VK_ERROR_VALIDATION_FAILED_EXT;
    12684  }
    12685  if(newSize == alloc->GetSize())
    12686  {
    12687  return VK_SUCCESS;
    12688  }
    12689 
    12690  switch(alloc->GetType())
    12691  {
    12692  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12693  return VK_ERROR_FEATURE_NOT_PRESENT;
    12694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12695  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12696  {
    12697  alloc->ChangeSize(newSize);
    12698  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12699  return VK_SUCCESS;
    12700  }
    12701  else
    12702  {
    12703  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12704  }
    12705  default:
    12706  VMA_ASSERT(0);
    12707  return VK_ERROR_VALIDATION_FAILED_EXT;
    12708  }
    12709 }
    12710 
    12711 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12712 {
    12713  // Initialize.
    12714  InitStatInfo(pStats->total);
    12715  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12716  InitStatInfo(pStats->memoryType[i]);
    12717  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12718  InitStatInfo(pStats->memoryHeap[i]);
    12719 
    12720  // Process default pools.
    12721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12722  {
    12723  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12724  VMA_ASSERT(pBlockVector);
    12725  pBlockVector->AddStats(pStats);
    12726  }
    12727 
    12728  // Process custom pools.
    12729  {
    12730  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12731  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12732  {
    12733  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12734  }
    12735  }
    12736 
    12737  // Process dedicated allocations.
    12738  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12739  {
    12740  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12741  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12742  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12743  VMA_ASSERT(pDedicatedAllocVector);
    12744  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12745  {
    12746  VmaStatInfo allocationStatInfo;
    12747  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12748  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12749  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12750  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12751  }
    12752  }
    12753 
    12754  // Postprocess.
    12755  VmaPostprocessCalcStatInfo(pStats->total);
    12756  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12757  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12758  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12759  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12760 }
    12761 
    12762 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12763 
    12764 VkResult VmaAllocator_T::Defragment(
    12765  VmaAllocation* pAllocations,
    12766  size_t allocationCount,
    12767  VkBool32* pAllocationsChanged,
    12768  const VmaDefragmentationInfo* pDefragmentationInfo,
    12769  VmaDefragmentationStats* pDefragmentationStats)
    12770 {
    12771  if(pAllocationsChanged != VMA_NULL)
    12772  {
    12773  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12774  }
    12775  if(pDefragmentationStats != VMA_NULL)
    12776  {
    12777  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12778  }
    12779 
    12780  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12781 
    12782  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12783 
    12784  const size_t poolCount = m_Pools.size();
    12785 
    12786  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12787  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12788  {
    12789  VmaAllocation hAlloc = pAllocations[allocIndex];
    12790  VMA_ASSERT(hAlloc);
    12791  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12792  // DedicatedAlloc cannot be defragmented.
    12793  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12794  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12795  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12796  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12797  // Lost allocation cannot be defragmented.
    12798  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12799  {
    12800  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12801 
    12802  const VmaPool hAllocPool = hAlloc->GetPool();
    12803  // This allocation belongs to custom pool.
    12804  if(hAllocPool != VK_NULL_HANDLE)
    12805  {
    12806  // Pools with linear or buddy algorithm are not defragmented.
    12807  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12808  {
    12809  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12810  }
    12811  }
    12812  // This allocation belongs to general pool.
    12813  else
    12814  {
    12815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12816  }
    12817 
    12818  if(pAllocBlockVector != VMA_NULL)
    12819  {
    12820  VmaDefragmentator* const pDefragmentator =
    12821  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12822  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12823  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12824  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12825  }
    12826  }
    12827  }
    12828 
    12829  VkResult result = VK_SUCCESS;
    12830 
    12831  // ======== Main processing.
    12832 
    12833  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12834  uint32_t maxAllocationsToMove = UINT32_MAX;
    12835  if(pDefragmentationInfo != VMA_NULL)
    12836  {
    12837  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12838  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12839  }
    12840 
    12841  // Process standard memory.
    12842  for(uint32_t memTypeIndex = 0;
    12843  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12844  ++memTypeIndex)
    12845  {
    12846  // Only HOST_VISIBLE memory types can be defragmented.
    12847  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12848  {
    12849  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12850  pDefragmentationStats,
    12851  maxBytesToMove,
    12852  maxAllocationsToMove);
    12853  }
    12854  }
    12855 
    12856  // Process custom pools.
    12857  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12858  {
    12859  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12860  pDefragmentationStats,
    12861  maxBytesToMove,
    12862  maxAllocationsToMove);
    12863  }
    12864 
    12865  // ======== Destroy defragmentators.
    12866 
    12867  // Process custom pools.
    12868  for(size_t poolIndex = poolCount; poolIndex--; )
    12869  {
    12870  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12871  }
    12872 
    12873  // Process standard memory.
    12874  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12875  {
    12876  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12877  {
    12878  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12879  }
    12880  }
    12881 
    12882  return result;
    12883 }
    12884 
    12885 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12886 {
    12887  if(hAllocation->CanBecomeLost())
    12888  {
    12889  /*
    12890  Warning: This is a carefully designed algorithm.
    12891  Do not modify unless you really know what you're doing :)
    12892  */
    12893  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12894  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12895  for(;;)
    12896  {
    12897  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12898  {
    12899  pAllocationInfo->memoryType = UINT32_MAX;
    12900  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12901  pAllocationInfo->offset = 0;
    12902  pAllocationInfo->size = hAllocation->GetSize();
    12903  pAllocationInfo->pMappedData = VMA_NULL;
    12904  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12905  return;
    12906  }
    12907  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12908  {
    12909  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12910  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12911  pAllocationInfo->offset = hAllocation->GetOffset();
    12912  pAllocationInfo->size = hAllocation->GetSize();
    12913  pAllocationInfo->pMappedData = VMA_NULL;
    12914  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12915  return;
    12916  }
    12917  else // Last use time earlier than current time.
    12918  {
    12919  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12920  {
    12921  localLastUseFrameIndex = localCurrFrameIndex;
    12922  }
    12923  }
    12924  }
    12925  }
    12926  else
    12927  {
    12928 #if VMA_STATS_STRING_ENABLED
    12929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12931  for(;;)
    12932  {
    12933  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12934  if(localLastUseFrameIndex == localCurrFrameIndex)
    12935  {
    12936  break;
    12937  }
    12938  else // Last use time earlier than current time.
    12939  {
    12940  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12941  {
    12942  localLastUseFrameIndex = localCurrFrameIndex;
    12943  }
    12944  }
    12945  }
    12946 #endif
    12947 
    12948  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12949  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12950  pAllocationInfo->offset = hAllocation->GetOffset();
    12951  pAllocationInfo->size = hAllocation->GetSize();
    12952  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12953  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12954  }
    12955 }
    12956 
    12957 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12958 {
    12959  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12960  if(hAllocation->CanBecomeLost())
    12961  {
    12962  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12963  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12964  for(;;)
    12965  {
    12966  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12967  {
    12968  return false;
    12969  }
    12970  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12971  {
    12972  return true;
    12973  }
    12974  else // Last use time earlier than current time.
    12975  {
    12976  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12977  {
    12978  localLastUseFrameIndex = localCurrFrameIndex;
    12979  }
    12980  }
    12981  }
    12982  }
    12983  else
    12984  {
    12985 #if VMA_STATS_STRING_ENABLED
    12986  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12987  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12988  for(;;)
    12989  {
    12990  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12991  if(localLastUseFrameIndex == localCurrFrameIndex)
    12992  {
    12993  break;
    12994  }
    12995  else // Last use time earlier than current time.
    12996  {
    12997  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12998  {
    12999  localLastUseFrameIndex = localCurrFrameIndex;
    13000  }
    13001  }
    13002  }
    13003 #endif
    13004 
    13005  return true;
    13006  }
    13007 }
    13008 
    13009 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13010 {
    13011  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13012 
    13013  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13014 
    13015  if(newCreateInfo.maxBlockCount == 0)
    13016  {
    13017  newCreateInfo.maxBlockCount = SIZE_MAX;
    13018  }
    13019  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13020  {
    13021  return VK_ERROR_INITIALIZATION_FAILED;
    13022  }
    13023 
    13024  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13025 
    13026  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13027 
    13028  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13029  if(res != VK_SUCCESS)
    13030  {
    13031  vma_delete(this, *pPool);
    13032  *pPool = VMA_NULL;
    13033  return res;
    13034  }
    13035 
    13036  // Add to m_Pools.
    13037  {
    13038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13039  (*pPool)->SetId(m_NextPoolId++);
    13040  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13041  }
    13042 
    13043  return VK_SUCCESS;
    13044 }
    13045 
    13046 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13047 {
    13048  // Remove from m_Pools.
    13049  {
    13050  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13052  VMA_ASSERT(success && "Pool not found in Allocator.");
    13053  }
    13054 
    13055  vma_delete(this, pool);
    13056 }
    13057 
    13058 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13059 {
    13060  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13061 }
    13062 
    13063 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13064 {
    13065  m_CurrentFrameIndex.store(frameIndex);
    13066 }
    13067 
    13068 void VmaAllocator_T::MakePoolAllocationsLost(
    13069  VmaPool hPool,
    13070  size_t* pLostAllocationCount)
    13071 {
    13072  hPool->m_BlockVector.MakePoolAllocationsLost(
    13073  m_CurrentFrameIndex.load(),
    13074  pLostAllocationCount);
    13075 }
    13076 
    13077 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13078 {
    13079  return hPool->m_BlockVector.CheckCorruption();
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13083 {
    13084  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13085 
    13086  // Process default pools.
    13087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13088  {
    13089  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13090  {
    13091  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13092  VMA_ASSERT(pBlockVector);
    13093  VkResult localRes = pBlockVector->CheckCorruption();
    13094  switch(localRes)
    13095  {
    13096  case VK_ERROR_FEATURE_NOT_PRESENT:
    13097  break;
    13098  case VK_SUCCESS:
    13099  finalRes = VK_SUCCESS;
    13100  break;
    13101  default:
    13102  return localRes;
    13103  }
    13104  }
    13105  }
    13106 
    13107  // Process custom pools.
    13108  {
    13109  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13110  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13111  {
    13112  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13113  {
    13114  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13115  switch(localRes)
    13116  {
    13117  case VK_ERROR_FEATURE_NOT_PRESENT:
    13118  break;
    13119  case VK_SUCCESS:
    13120  finalRes = VK_SUCCESS;
    13121  break;
    13122  default:
    13123  return localRes;
    13124  }
    13125  }
    13126  }
    13127  }
    13128 
    13129  return finalRes;
    13130 }
    13131 
    13132 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13133 {
    13134  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13135  (*pAllocation)->InitLost();
    13136 }
    13137 
    13138 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13139 {
    13140  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13141 
    13142  VkResult res;
    13143  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13144  {
    13145  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13146  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13147  {
    13148  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13149  if(res == VK_SUCCESS)
    13150  {
    13151  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13152  }
    13153  }
    13154  else
    13155  {
    13156  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13162  }
    13163 
    13164  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13165  {
    13166  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13167  }
    13168 
    13169  return res;
    13170 }
    13171 
    13172 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13173 {
    13174  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13175  {
    13176  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13177  }
    13178 
    13179  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13180 
    13181  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13182  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13183  {
    13184  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13185  m_HeapSizeLimit[heapIndex] += size;
    13186  }
    13187 }
    13188 
    13189 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13190 {
    13191  if(hAllocation->CanBecomeLost())
    13192  {
    13193  return VK_ERROR_MEMORY_MAP_FAILED;
    13194  }
    13195 
    13196  switch(hAllocation->GetType())
    13197  {
    13198  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13199  {
    13200  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13201  char *pBytes = VMA_NULL;
    13202  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13203  if(res == VK_SUCCESS)
    13204  {
    13205  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13206  hAllocation->BlockAllocMap();
    13207  }
    13208  return res;
    13209  }
    13210  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13211  return hAllocation->DedicatedAllocMap(this, ppData);
    13212  default:
    13213  VMA_ASSERT(0);
    13214  return VK_ERROR_MEMORY_MAP_FAILED;
    13215  }
    13216 }
    13217 
    13218 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13219 {
    13220  switch(hAllocation->GetType())
    13221  {
    13222  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13223  {
    13224  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13225  hAllocation->BlockAllocUnmap();
    13226  pBlock->Unmap(this, 1);
    13227  }
    13228  break;
    13229  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13230  hAllocation->DedicatedAllocUnmap(this);
    13231  break;
    13232  default:
    13233  VMA_ASSERT(0);
    13234  }
    13235 }
    13236 
    13237 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13238 {
    13239  VkResult res = VK_SUCCESS;
    13240  switch(hAllocation->GetType())
    13241  {
    13242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13243  res = GetVulkanFunctions().vkBindBufferMemory(
    13244  m_hDevice,
    13245  hBuffer,
    13246  hAllocation->GetMemory(),
    13247  0); //memoryOffset
    13248  break;
    13249  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13250  {
    13251  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13252  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13253  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13254  break;
    13255  }
    13256  default:
    13257  VMA_ASSERT(0);
    13258  }
    13259  return res;
    13260 }
    13261 
    13262 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13263 {
    13264  VkResult res = VK_SUCCESS;
    13265  switch(hAllocation->GetType())
    13266  {
    13267  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13268  res = GetVulkanFunctions().vkBindImageMemory(
    13269  m_hDevice,
    13270  hImage,
    13271  hAllocation->GetMemory(),
    13272  0); //memoryOffset
    13273  break;
    13274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13275  {
    13276  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13277  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13278  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13279  break;
    13280  }
    13281  default:
    13282  VMA_ASSERT(0);
    13283  }
    13284  return res;
    13285 }
    13286 
    13287 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13288  VmaAllocation hAllocation,
    13289  VkDeviceSize offset, VkDeviceSize size,
    13290  VMA_CACHE_OPERATION op)
    13291 {
    13292  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13293  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13294  {
    13295  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13296  VMA_ASSERT(offset <= allocationSize);
    13297 
    13298  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13299 
    13300  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13301  memRange.memory = hAllocation->GetMemory();
    13302 
    13303  switch(hAllocation->GetType())
    13304  {
    13305  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13307  if(size == VK_WHOLE_SIZE)
    13308  {
    13309  memRange.size = allocationSize - memRange.offset;
    13310  }
    13311  else
    13312  {
    13313  VMA_ASSERT(offset + size <= allocationSize);
    13314  memRange.size = VMA_MIN(
    13315  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13316  allocationSize - memRange.offset);
    13317  }
    13318  break;
    13319 
    13320  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13321  {
    13322  // 1. Still within this allocation.
    13323  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13324  if(size == VK_WHOLE_SIZE)
    13325  {
    13326  size = allocationSize - offset;
    13327  }
    13328  else
    13329  {
    13330  VMA_ASSERT(offset + size <= allocationSize);
    13331  }
    13332  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13333 
    13334  // 2. Adjust to whole block.
    13335  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13336  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13337  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13338  memRange.offset += allocationOffset;
    13339  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13340 
    13341  break;
    13342  }
    13343 
    13344  default:
    13345  VMA_ASSERT(0);
    13346  }
    13347 
    13348  switch(op)
    13349  {
    13350  case VMA_CACHE_FLUSH:
    13351  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13352  break;
    13353  case VMA_CACHE_INVALIDATE:
    13354  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13355  break;
    13356  default:
    13357  VMA_ASSERT(0);
    13358  }
    13359  }
    13360  // else: Just ignore this call.
    13361 }
    13362 
    13363 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13364 {
    13365  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13366 
    13367  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13368  {
    13369  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13370  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13371  VMA_ASSERT(pDedicatedAllocations);
    13372  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13373  VMA_ASSERT(success);
    13374  }
    13375 
    13376  VkDeviceMemory hMemory = allocation->GetMemory();
    13377 
    13378  /*
    13379  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13380  before vkFreeMemory.
    13381 
    13382  if(allocation->GetMappedData() != VMA_NULL)
    13383  {
    13384  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13385  }
    13386  */
    13387 
    13388  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13389 
    13390  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13391 }
    13392 
    13393 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13394 {
    13395  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13396  !hAllocation->CanBecomeLost() &&
    13397  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13398  {
    13399  void* pData = VMA_NULL;
    13400  VkResult res = Map(hAllocation, &pData);
    13401  if(res == VK_SUCCESS)
    13402  {
    13403  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13404  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13405  Unmap(hAllocation);
    13406  }
    13407  else
    13408  {
    13409  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13410  }
    13411  }
    13412 }
    13413 
    13414 #if VMA_STATS_STRING_ENABLED
    13415 
    13416 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13417 {
    13418  bool dedicatedAllocationsStarted = false;
    13419  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13420  {
    13421  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13422  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13423  VMA_ASSERT(pDedicatedAllocVector);
    13424  if(pDedicatedAllocVector->empty() == false)
    13425  {
    13426  if(dedicatedAllocationsStarted == false)
    13427  {
    13428  dedicatedAllocationsStarted = true;
    13429  json.WriteString("DedicatedAllocations");
    13430  json.BeginObject();
    13431  }
    13432 
    13433  json.BeginString("Type ");
    13434  json.ContinueString(memTypeIndex);
    13435  json.EndString();
    13436 
    13437  json.BeginArray();
    13438 
    13439  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13440  {
    13441  json.BeginObject(true);
    13442  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13443  hAlloc->PrintParameters(json);
    13444  json.EndObject();
    13445  }
    13446 
    13447  json.EndArray();
    13448  }
    13449  }
    13450  if(dedicatedAllocationsStarted)
    13451  {
    13452  json.EndObject();
    13453  }
    13454 
    13455  {
    13456  bool allocationsStarted = false;
    13457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13458  {
    13459  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13460  {
    13461  if(allocationsStarted == false)
    13462  {
    13463  allocationsStarted = true;
    13464  json.WriteString("DefaultPools");
    13465  json.BeginObject();
    13466  }
    13467 
    13468  json.BeginString("Type ");
    13469  json.ContinueString(memTypeIndex);
    13470  json.EndString();
    13471 
    13472  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13473  }
    13474  }
    13475  if(allocationsStarted)
    13476  {
    13477  json.EndObject();
    13478  }
    13479  }
    13480 
    13481  // Custom pools
    13482  {
    13483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13484  const size_t poolCount = m_Pools.size();
    13485  if(poolCount > 0)
    13486  {
    13487  json.WriteString("Pools");
    13488  json.BeginObject();
    13489  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13490  {
    13491  json.BeginString();
    13492  json.ContinueString(m_Pools[poolIndex]->GetId());
    13493  json.EndString();
    13494 
    13495  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13496  }
    13497  json.EndObject();
    13498  }
    13499  }
    13500 }
    13501 
    13502 #endif // #if VMA_STATS_STRING_ENABLED
    13503 
    13505 // Public interface
    13506 
    13507 VkResult vmaCreateAllocator(
    13508  const VmaAllocatorCreateInfo* pCreateInfo,
    13509  VmaAllocator* pAllocator)
    13510 {
    13511  VMA_ASSERT(pCreateInfo && pAllocator);
    13512  VMA_DEBUG_LOG("vmaCreateAllocator");
    13513  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13514  return (*pAllocator)->Init(pCreateInfo);
    13515 }
    13516 
    13517 void vmaDestroyAllocator(
    13518  VmaAllocator allocator)
    13519 {
    13520  if(allocator != VK_NULL_HANDLE)
    13521  {
    13522  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13523  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13524  vma_delete(&allocationCallbacks, allocator);
    13525  }
    13526 }
    13527 
    13529  VmaAllocator allocator,
    13530  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13531 {
    13532  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13533  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13534 }
    13535 
    13537  VmaAllocator allocator,
    13538  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13539 {
    13540  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13541  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13542 }
    13543 
    13545  VmaAllocator allocator,
    13546  uint32_t memoryTypeIndex,
    13547  VkMemoryPropertyFlags* pFlags)
    13548 {
    13549  VMA_ASSERT(allocator && pFlags);
    13550  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13551  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  uint32_t frameIndex)
    13557 {
    13558  VMA_ASSERT(allocator);
    13559  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13560 
    13561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13562 
    13563  allocator->SetCurrentFrameIndex(frameIndex);
    13564 }
    13565 
    13566 void vmaCalculateStats(
    13567  VmaAllocator allocator,
    13568  VmaStats* pStats)
    13569 {
    13570  VMA_ASSERT(allocator && pStats);
    13571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13572  allocator->CalculateStats(pStats);
    13573 }
    13574 
    13575 #if VMA_STATS_STRING_ENABLED
    13576 
    13577 void vmaBuildStatsString(
    13578  VmaAllocator allocator,
    13579  char** ppStatsString,
    13580  VkBool32 detailedMap)
    13581 {
    13582  VMA_ASSERT(allocator && ppStatsString);
    13583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13584 
    13585  VmaStringBuilder sb(allocator);
    13586  {
    13587  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13588  json.BeginObject();
    13589 
    13590  VmaStats stats;
    13591  allocator->CalculateStats(&stats);
    13592 
    13593  json.WriteString("Total");
    13594  VmaPrintStatInfo(json, stats.total);
    13595 
    13596  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13597  {
    13598  json.BeginString("Heap ");
    13599  json.ContinueString(heapIndex);
    13600  json.EndString();
    13601  json.BeginObject();
    13602 
    13603  json.WriteString("Size");
    13604  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13605 
    13606  json.WriteString("Flags");
    13607  json.BeginArray(true);
    13608  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13609  {
    13610  json.WriteString("DEVICE_LOCAL");
    13611  }
    13612  json.EndArray();
    13613 
    13614  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13615  {
    13616  json.WriteString("Stats");
    13617  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13618  }
    13619 
    13620  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13621  {
    13622  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13623  {
    13624  json.BeginString("Type ");
    13625  json.ContinueString(typeIndex);
    13626  json.EndString();
    13627 
    13628  json.BeginObject();
    13629 
    13630  json.WriteString("Flags");
    13631  json.BeginArray(true);
    13632  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13633  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13634  {
    13635  json.WriteString("DEVICE_LOCAL");
    13636  }
    13637  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13638  {
    13639  json.WriteString("HOST_VISIBLE");
    13640  }
    13641  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13642  {
    13643  json.WriteString("HOST_COHERENT");
    13644  }
    13645  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13646  {
    13647  json.WriteString("HOST_CACHED");
    13648  }
    13649  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13650  {
    13651  json.WriteString("LAZILY_ALLOCATED");
    13652  }
    13653  json.EndArray();
    13654 
    13655  if(stats.memoryType[typeIndex].blockCount > 0)
    13656  {
    13657  json.WriteString("Stats");
    13658  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13659  }
    13660 
    13661  json.EndObject();
    13662  }
    13663  }
    13664 
    13665  json.EndObject();
    13666  }
    13667  if(detailedMap == VK_TRUE)
    13668  {
    13669  allocator->PrintDetailedMap(json);
    13670  }
    13671 
    13672  json.EndObject();
    13673  }
    13674 
    13675  const size_t len = sb.GetLength();
    13676  char* const pChars = vma_new_array(allocator, char, len + 1);
    13677  if(len > 0)
    13678  {
    13679  memcpy(pChars, sb.GetData(), len);
    13680  }
    13681  pChars[len] = '\0';
    13682  *ppStatsString = pChars;
    13683 }
    13684 
    13685 void vmaFreeStatsString(
    13686  VmaAllocator allocator,
    13687  char* pStatsString)
    13688 {
    13689  if(pStatsString != VMA_NULL)
    13690  {
    13691  VMA_ASSERT(allocator);
    13692  size_t len = strlen(pStatsString);
    13693  vma_delete_array(allocator, pStatsString, len + 1);
    13694  }
    13695 }
    13696 
    13697 #endif // #if VMA_STATS_STRING_ENABLED
    13698 
    13699 /*
    13700 This function is not protected by any mutex because it just reads immutable data.
    13701 */
    13702 VkResult vmaFindMemoryTypeIndex(
    13703  VmaAllocator allocator,
    13704  uint32_t memoryTypeBits,
    13705  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13706  uint32_t* pMemoryTypeIndex)
    13707 {
    13708  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13709  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13710  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13711 
    13712  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13713  {
    13714  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13715  }
    13716 
    13717  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13718  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13719 
    13720  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13721  if(mapped)
    13722  {
    13723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13724  }
    13725 
    13726  // Convert usage to requiredFlags and preferredFlags.
    13727  switch(pAllocationCreateInfo->usage)
    13728  {
    13730  break;
    13732  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13733  {
    13734  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13735  }
    13736  break;
    13738  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13739  break;
    13741  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13742  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13743  {
    13744  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13745  }
    13746  break;
    13748  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13749  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13750  break;
    13751  default:
    13752  break;
    13753  }
    13754 
    13755  *pMemoryTypeIndex = UINT32_MAX;
    13756  uint32_t minCost = UINT32_MAX;
    13757  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13758  memTypeIndex < allocator->GetMemoryTypeCount();
    13759  ++memTypeIndex, memTypeBit <<= 1)
    13760  {
    13761  // This memory type is acceptable according to memoryTypeBits bitmask.
    13762  if((memTypeBit & memoryTypeBits) != 0)
    13763  {
    13764  const VkMemoryPropertyFlags currFlags =
    13765  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13766  // This memory type contains requiredFlags.
    13767  if((requiredFlags & ~currFlags) == 0)
    13768  {
    13769  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13770  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13771  // Remember memory type with lowest cost.
    13772  if(currCost < minCost)
    13773  {
    13774  *pMemoryTypeIndex = memTypeIndex;
    13775  if(currCost == 0)
    13776  {
    13777  return VK_SUCCESS;
    13778  }
    13779  minCost = currCost;
    13780  }
    13781  }
    13782  }
    13783  }
    13784  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13785 }
    13786 
    13788  VmaAllocator allocator,
    13789  const VkBufferCreateInfo* pBufferCreateInfo,
    13790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13791  uint32_t* pMemoryTypeIndex)
    13792 {
    13793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13794  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13797 
    13798  const VkDevice hDev = allocator->m_hDevice;
    13799  VkBuffer hBuffer = VK_NULL_HANDLE;
    13800  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13801  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13802  if(res == VK_SUCCESS)
    13803  {
    13804  VkMemoryRequirements memReq = {};
    13805  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13806  hDev, hBuffer, &memReq);
    13807 
    13808  res = vmaFindMemoryTypeIndex(
    13809  allocator,
    13810  memReq.memoryTypeBits,
    13811  pAllocationCreateInfo,
    13812  pMemoryTypeIndex);
    13813 
    13814  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13815  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13816  }
    13817  return res;
    13818 }
    13819 
    13821  VmaAllocator allocator,
    13822  const VkImageCreateInfo* pImageCreateInfo,
    13823  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13824  uint32_t* pMemoryTypeIndex)
    13825 {
    13826  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13827  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13828  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13829  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13830 
    13831  const VkDevice hDev = allocator->m_hDevice;
    13832  VkImage hImage = VK_NULL_HANDLE;
    13833  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13834  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13835  if(res == VK_SUCCESS)
    13836  {
    13837  VkMemoryRequirements memReq = {};
    13838  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13839  hDev, hImage, &memReq);
    13840 
    13841  res = vmaFindMemoryTypeIndex(
    13842  allocator,
    13843  memReq.memoryTypeBits,
    13844  pAllocationCreateInfo,
    13845  pMemoryTypeIndex);
    13846 
    13847  allocator->GetVulkanFunctions().vkDestroyImage(
    13848  hDev, hImage, allocator->GetAllocationCallbacks());
    13849  }
    13850  return res;
    13851 }
    13852 
    13853 VkResult vmaCreatePool(
    13854  VmaAllocator allocator,
    13855  const VmaPoolCreateInfo* pCreateInfo,
    13856  VmaPool* pPool)
    13857 {
    13858  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13859 
    13860  VMA_DEBUG_LOG("vmaCreatePool");
    13861 
    13862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13863 
    13864  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13865 
    13866 #if VMA_RECORDING_ENABLED
    13867  if(allocator->GetRecorder() != VMA_NULL)
    13868  {
    13869  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13870  }
    13871 #endif
    13872 
    13873  return res;
    13874 }
    13875 
    13876 void vmaDestroyPool(
    13877  VmaAllocator allocator,
    13878  VmaPool pool)
    13879 {
    13880  VMA_ASSERT(allocator);
    13881 
    13882  if(pool == VK_NULL_HANDLE)
    13883  {
    13884  return;
    13885  }
    13886 
    13887  VMA_DEBUG_LOG("vmaDestroyPool");
    13888 
    13889  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13890 
    13891 #if VMA_RECORDING_ENABLED
    13892  if(allocator->GetRecorder() != VMA_NULL)
    13893  {
    13894  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13895  }
    13896 #endif
    13897 
    13898  allocator->DestroyPool(pool);
    13899 }
    13900 
    13901 void vmaGetPoolStats(
    13902  VmaAllocator allocator,
    13903  VmaPool pool,
    13904  VmaPoolStats* pPoolStats)
    13905 {
    13906  VMA_ASSERT(allocator && pool && pPoolStats);
    13907 
    13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13909 
    13910  allocator->GetPoolStats(pool, pPoolStats);
    13911 }
    13912 
    13914  VmaAllocator allocator,
    13915  VmaPool pool,
    13916  size_t* pLostAllocationCount)
    13917 {
    13918  VMA_ASSERT(allocator && pool);
    13919 
    13920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13921 
    13922 #if VMA_RECORDING_ENABLED
    13923  if(allocator->GetRecorder() != VMA_NULL)
    13924  {
    13925  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13926  }
    13927 #endif
    13928 
    13929  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13930 }
    13931 
    13932 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13933 {
    13934  VMA_ASSERT(allocator && pool);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13937 
    13938  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13939 
    13940  return allocator->CheckPoolCorruption(pool);
    13941 }
    13942 
    13943 VkResult vmaAllocateMemory(
    13944  VmaAllocator allocator,
    13945  const VkMemoryRequirements* pVkMemoryRequirements,
    13946  const VmaAllocationCreateInfo* pCreateInfo,
    13947  VmaAllocation* pAllocation,
    13948  VmaAllocationInfo* pAllocationInfo)
    13949 {
    13950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13951 
    13952  VMA_DEBUG_LOG("vmaAllocateMemory");
    13953 
    13954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13955 
    13956  VkResult result = allocator->AllocateMemory(
    13957  *pVkMemoryRequirements,
    13958  false, // requiresDedicatedAllocation
    13959  false, // prefersDedicatedAllocation
    13960  VK_NULL_HANDLE, // dedicatedBuffer
    13961  VK_NULL_HANDLE, // dedicatedImage
    13962  *pCreateInfo,
    13963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13964  pAllocation);
    13965 
    13966 #if VMA_RECORDING_ENABLED
    13967  if(allocator->GetRecorder() != VMA_NULL)
    13968  {
    13969  allocator->GetRecorder()->RecordAllocateMemory(
    13970  allocator->GetCurrentFrameIndex(),
    13971  *pVkMemoryRequirements,
    13972  *pCreateInfo,
    13973  *pAllocation);
    13974  }
    13975 #endif
    13976 
    13977  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13978  {
    13979  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13980  }
    13981 
    13982  return result;
    13983 }
    13984 
    13986  VmaAllocator allocator,
    13987  VkBuffer buffer,
    13988  const VmaAllocationCreateInfo* pCreateInfo,
    13989  VmaAllocation* pAllocation,
    13990  VmaAllocationInfo* pAllocationInfo)
    13991 {
    13992  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13993 
    13994  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13995 
    13996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13997 
    13998  VkMemoryRequirements vkMemReq = {};
    13999  bool requiresDedicatedAllocation = false;
    14000  bool prefersDedicatedAllocation = false;
    14001  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14002  requiresDedicatedAllocation,
    14003  prefersDedicatedAllocation);
    14004 
    14005  VkResult result = allocator->AllocateMemory(
    14006  vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation,
    14009  buffer, // dedicatedBuffer
    14010  VK_NULL_HANDLE, // dedicatedImage
    14011  *pCreateInfo,
    14012  VMA_SUBALLOCATION_TYPE_BUFFER,
    14013  pAllocation);
    14014 
    14015 #if VMA_RECORDING_ENABLED
    14016  if(allocator->GetRecorder() != VMA_NULL)
    14017  {
    14018  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14019  allocator->GetCurrentFrameIndex(),
    14020  vkMemReq,
    14021  requiresDedicatedAllocation,
    14022  prefersDedicatedAllocation,
    14023  *pCreateInfo,
    14024  *pAllocation);
    14025  }
    14026 #endif
    14027 
    14028  if(pAllocationInfo && result == VK_SUCCESS)
    14029  {
    14030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14031  }
    14032 
    14033  return result;
    14034 }
    14035 
    14036 VkResult vmaAllocateMemoryForImage(
    14037  VmaAllocator allocator,
    14038  VkImage image,
    14039  const VmaAllocationCreateInfo* pCreateInfo,
    14040  VmaAllocation* pAllocation,
    14041  VmaAllocationInfo* pAllocationInfo)
    14042 {
    14043  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14044 
    14045  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14046 
    14047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14048 
    14049  VkMemoryRequirements vkMemReq = {};
    14050  bool requiresDedicatedAllocation = false;
    14051  bool prefersDedicatedAllocation = false;
    14052  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14053  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14054 
    14055  VkResult result = allocator->AllocateMemory(
    14056  vkMemReq,
    14057  requiresDedicatedAllocation,
    14058  prefersDedicatedAllocation,
    14059  VK_NULL_HANDLE, // dedicatedBuffer
    14060  image, // dedicatedImage
    14061  *pCreateInfo,
    14062  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14063  pAllocation);
    14064 
    14065 #if VMA_RECORDING_ENABLED
    14066  if(allocator->GetRecorder() != VMA_NULL)
    14067  {
    14068  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14069  allocator->GetCurrentFrameIndex(),
    14070  vkMemReq,
    14071  requiresDedicatedAllocation,
    14072  prefersDedicatedAllocation,
    14073  *pCreateInfo,
    14074  *pAllocation);
    14075  }
    14076 #endif
    14077 
    14078  if(pAllocationInfo && result == VK_SUCCESS)
    14079  {
    14080  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14081  }
    14082 
    14083  return result;
    14084 }
    14085 
    14086 void vmaFreeMemory(
    14087  VmaAllocator allocator,
    14088  VmaAllocation allocation)
    14089 {
    14090  VMA_ASSERT(allocator);
    14091 
    14092  if(allocation == VK_NULL_HANDLE)
    14093  {
    14094  return;
    14095  }
    14096 
    14097  VMA_DEBUG_LOG("vmaFreeMemory");
    14098 
    14099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14100 
    14101 #if VMA_RECORDING_ENABLED
    14102  if(allocator->GetRecorder() != VMA_NULL)
    14103  {
    14104  allocator->GetRecorder()->RecordFreeMemory(
    14105  allocator->GetCurrentFrameIndex(),
    14106  allocation);
    14107  }
    14108 #endif
    14109 
    14110  allocator->FreeMemory(allocation);
    14111 }
    14112 
    14113 VkResult vmaResizeAllocation(
    14114  VmaAllocator allocator,
    14115  VmaAllocation allocation,
    14116  VkDeviceSize newSize)
    14117 {
    14118  VMA_ASSERT(allocator && allocation);
    14119 
    14120  VMA_DEBUG_LOG("vmaResizeAllocation");
    14121 
    14122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14123 
    14124 #if VMA_RECORDING_ENABLED
    14125  if(allocator->GetRecorder() != VMA_NULL)
    14126  {
    14127  allocator->GetRecorder()->RecordResizeAllocation(
    14128  allocator->GetCurrentFrameIndex(),
    14129  allocation,
    14130  newSize);
    14131  }
    14132 #endif
    14133 
    14134  return allocator->ResizeAllocation(allocation, newSize);
    14135 }
    14136 
    14138  VmaAllocator allocator,
    14139  VmaAllocation allocation,
    14140  VmaAllocationInfo* pAllocationInfo)
    14141 {
    14142  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14143 
    14144  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordGetAllocationInfo(
    14150  allocator->GetCurrentFrameIndex(),
    14151  allocation);
    14152  }
    14153 #endif
    14154 
    14155  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14156 }
    14157 
    14158 VkBool32 vmaTouchAllocation(
    14159  VmaAllocator allocator,
    14160  VmaAllocation allocation)
    14161 {
    14162  VMA_ASSERT(allocator && allocation);
    14163 
    14164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14165 
    14166 #if VMA_RECORDING_ENABLED
    14167  if(allocator->GetRecorder() != VMA_NULL)
    14168  {
    14169  allocator->GetRecorder()->RecordTouchAllocation(
    14170  allocator->GetCurrentFrameIndex(),
    14171  allocation);
    14172  }
    14173 #endif
    14174 
    14175  return allocator->TouchAllocation(allocation);
    14176 }
    14177 
    14179  VmaAllocator allocator,
    14180  VmaAllocation allocation,
    14181  void* pUserData)
    14182 {
    14183  VMA_ASSERT(allocator && allocation);
    14184 
    14185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14186 
    14187  allocation->SetUserData(allocator, pUserData);
    14188 
    14189 #if VMA_RECORDING_ENABLED
    14190  if(allocator->GetRecorder() != VMA_NULL)
    14191  {
    14192  allocator->GetRecorder()->RecordSetAllocationUserData(
    14193  allocator->GetCurrentFrameIndex(),
    14194  allocation,
    14195  pUserData);
    14196  }
    14197 #endif
    14198 }
    14199 
    14201  VmaAllocator allocator,
    14202  VmaAllocation* pAllocation)
    14203 {
    14204  VMA_ASSERT(allocator && pAllocation);
    14205 
    14206  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14207 
    14208  allocator->CreateLostAllocation(pAllocation);
    14209 
    14210 #if VMA_RECORDING_ENABLED
    14211  if(allocator->GetRecorder() != VMA_NULL)
    14212  {
    14213  allocator->GetRecorder()->RecordCreateLostAllocation(
    14214  allocator->GetCurrentFrameIndex(),
    14215  *pAllocation);
    14216  }
    14217 #endif
    14218 }
    14219 
    14220 VkResult vmaMapMemory(
    14221  VmaAllocator allocator,
    14222  VmaAllocation allocation,
    14223  void** ppData)
    14224 {
    14225  VMA_ASSERT(allocator && allocation && ppData);
    14226 
    14227  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14228 
    14229  VkResult res = allocator->Map(allocation, ppData);
    14230 
    14231 #if VMA_RECORDING_ENABLED
    14232  if(allocator->GetRecorder() != VMA_NULL)
    14233  {
    14234  allocator->GetRecorder()->RecordMapMemory(
    14235  allocator->GetCurrentFrameIndex(),
    14236  allocation);
    14237  }
    14238 #endif
    14239 
    14240  return res;
    14241 }
    14242 
    14243 void vmaUnmapMemory(
    14244  VmaAllocator allocator,
    14245  VmaAllocation allocation)
    14246 {
    14247  VMA_ASSERT(allocator && allocation);
    14248 
    14249  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14250 
    14251 #if VMA_RECORDING_ENABLED
    14252  if(allocator->GetRecorder() != VMA_NULL)
    14253  {
    14254  allocator->GetRecorder()->RecordUnmapMemory(
    14255  allocator->GetCurrentFrameIndex(),
    14256  allocation);
    14257  }
    14258 #endif
    14259 
    14260  allocator->Unmap(allocation);
    14261 }
    14262 
    14263 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14264 {
    14265  VMA_ASSERT(allocator && allocation);
    14266 
    14267  VMA_DEBUG_LOG("vmaFlushAllocation");
    14268 
    14269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14270 
    14271  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordFlushAllocation(
    14277  allocator->GetCurrentFrameIndex(),
    14278  allocation, offset, size);
    14279  }
    14280 #endif
    14281 }
    14282 
    14283 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14284 {
    14285  VMA_ASSERT(allocator && allocation);
    14286 
    14287  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14288 
    14289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14290 
    14291  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14292 
    14293 #if VMA_RECORDING_ENABLED
    14294  if(allocator->GetRecorder() != VMA_NULL)
    14295  {
    14296  allocator->GetRecorder()->RecordInvalidateAllocation(
    14297  allocator->GetCurrentFrameIndex(),
    14298  allocation, offset, size);
    14299  }
    14300 #endif
    14301 }
    14302 
    14303 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14304 {
    14305  VMA_ASSERT(allocator);
    14306 
    14307  VMA_DEBUG_LOG("vmaCheckCorruption");
    14308 
    14309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14310 
    14311  return allocator->CheckCorruption(memoryTypeBits);
    14312 }
    14313 
    14314 VkResult vmaDefragment(
    14315  VmaAllocator allocator,
    14316  VmaAllocation* pAllocations,
    14317  size_t allocationCount,
    14318  VkBool32* pAllocationsChanged,
    14319  const VmaDefragmentationInfo *pDefragmentationInfo,
    14320  VmaDefragmentationStats* pDefragmentationStats)
    14321 {
    14322  VMA_ASSERT(allocator && pAllocations);
    14323 
    14324  VMA_DEBUG_LOG("vmaDefragment");
    14325 
    14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14327 
    14328  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14329 }
    14330 
    14331 VkResult vmaBindBufferMemory(
    14332  VmaAllocator allocator,
    14333  VmaAllocation allocation,
    14334  VkBuffer buffer)
    14335 {
    14336  VMA_ASSERT(allocator && allocation && buffer);
    14337 
    14338  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14339 
    14340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14341 
    14342  return allocator->BindBufferMemory(allocation, buffer);
    14343 }
    14344 
    14345 VkResult vmaBindImageMemory(
    14346  VmaAllocator allocator,
    14347  VmaAllocation allocation,
    14348  VkImage image)
    14349 {
    14350  VMA_ASSERT(allocator && allocation && image);
    14351 
    14352  VMA_DEBUG_LOG("vmaBindImageMemory");
    14353 
    14354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14355 
    14356  return allocator->BindImageMemory(allocation, image);
    14357 }
    14358 
    14359 VkResult vmaCreateBuffer(
    14360  VmaAllocator allocator,
    14361  const VkBufferCreateInfo* pBufferCreateInfo,
    14362  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14363  VkBuffer* pBuffer,
    14364  VmaAllocation* pAllocation,
    14365  VmaAllocationInfo* pAllocationInfo)
    14366 {
    14367  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14368 
    14369  if(pBufferCreateInfo->size == 0)
    14370  {
    14371  return VK_ERROR_VALIDATION_FAILED_EXT;
    14372  }
    14373 
    14374  VMA_DEBUG_LOG("vmaCreateBuffer");
    14375 
    14376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14377 
    14378  *pBuffer = VK_NULL_HANDLE;
    14379  *pAllocation = VK_NULL_HANDLE;
    14380 
    14381  // 1. Create VkBuffer.
    14382  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14383  allocator->m_hDevice,
    14384  pBufferCreateInfo,
    14385  allocator->GetAllocationCallbacks(),
    14386  pBuffer);
    14387  if(res >= 0)
    14388  {
    14389  // 2. vkGetBufferMemoryRequirements.
    14390  VkMemoryRequirements vkMemReq = {};
    14391  bool requiresDedicatedAllocation = false;
    14392  bool prefersDedicatedAllocation = false;
    14393  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14394  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14395 
    14396  // Make sure alignment requirements for specific buffer usages reported
    14397  // in Physical Device Properties are included in alignment reported by memory requirements.
    14398  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14399  {
    14400  VMA_ASSERT(vkMemReq.alignment %
    14401  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14402  }
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14412  }
    14413 
    14414  // 3. Allocate memory using allocator.
    14415  res = allocator->AllocateMemory(
    14416  vkMemReq,
    14417  requiresDedicatedAllocation,
    14418  prefersDedicatedAllocation,
    14419  *pBuffer, // dedicatedBuffer
    14420  VK_NULL_HANDLE, // dedicatedImage
    14421  *pAllocationCreateInfo,
    14422  VMA_SUBALLOCATION_TYPE_BUFFER,
    14423  pAllocation);
    14424 
    14425 #if VMA_RECORDING_ENABLED
    14426  if(allocator->GetRecorder() != VMA_NULL)
    14427  {
    14428  allocator->GetRecorder()->RecordCreateBuffer(
    14429  allocator->GetCurrentFrameIndex(),
    14430  *pBufferCreateInfo,
    14431  *pAllocationCreateInfo,
    14432  *pAllocation);
    14433  }
    14434 #endif
    14435 
    14436  if(res >= 0)
    14437  {
    14438  // 3. Bind buffer with memory.
    14439  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14440  if(res >= 0)
    14441  {
    14442  // All steps succeeded.
    14443  #if VMA_STATS_STRING_ENABLED
    14444  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14445  #endif
    14446  if(pAllocationInfo != VMA_NULL)
    14447  {
    14448  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14449  }
    14450 
    14451  return VK_SUCCESS;
    14452  }
    14453  allocator->FreeMemory(*pAllocation);
    14454  *pAllocation = VK_NULL_HANDLE;
    14455  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14456  *pBuffer = VK_NULL_HANDLE;
    14457  return res;
    14458  }
    14459  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14460  *pBuffer = VK_NULL_HANDLE;
    14461  return res;
    14462  }
    14463  return res;
    14464 }
    14465 
    14466 void vmaDestroyBuffer(
    14467  VmaAllocator allocator,
    14468  VkBuffer buffer,
    14469  VmaAllocation allocation)
    14470 {
    14471  VMA_ASSERT(allocator);
    14472 
    14473  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14474  {
    14475  return;
    14476  }
    14477 
    14478  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14479 
    14480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14481 
    14482 #if VMA_RECORDING_ENABLED
    14483  if(allocator->GetRecorder() != VMA_NULL)
    14484  {
    14485  allocator->GetRecorder()->RecordDestroyBuffer(
    14486  allocator->GetCurrentFrameIndex(),
    14487  allocation);
    14488  }
    14489 #endif
    14490 
    14491  if(buffer != VK_NULL_HANDLE)
    14492  {
    14493  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14494  }
    14495 
    14496  if(allocation != VK_NULL_HANDLE)
    14497  {
    14498  allocator->FreeMemory(allocation);
    14499  }
    14500 }
    14501 
    14502 VkResult vmaCreateImage(
    14503  VmaAllocator allocator,
    14504  const VkImageCreateInfo* pImageCreateInfo,
    14505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14506  VkImage* pImage,
    14507  VmaAllocation* pAllocation,
    14508  VmaAllocationInfo* pAllocationInfo)
    14509 {
    14510  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14511 
    14512  if(pImageCreateInfo->extent.width == 0 ||
    14513  pImageCreateInfo->extent.height == 0 ||
    14514  pImageCreateInfo->extent.depth == 0 ||
    14515  pImageCreateInfo->mipLevels == 0 ||
    14516  pImageCreateInfo->arrayLayers == 0)
    14517  {
    14518  return VK_ERROR_VALIDATION_FAILED_EXT;
    14519  }
    14520 
    14521  VMA_DEBUG_LOG("vmaCreateImage");
    14522 
    14523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14524 
    14525  *pImage = VK_NULL_HANDLE;
    14526  *pAllocation = VK_NULL_HANDLE;
    14527 
    14528  // 1. Create VkImage.
    14529  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14530  allocator->m_hDevice,
    14531  pImageCreateInfo,
    14532  allocator->GetAllocationCallbacks(),
    14533  pImage);
    14534  if(res >= 0)
    14535  {
    14536  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14537  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14538  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14539 
    14540  // 2. Allocate memory using allocator.
    14541  VkMemoryRequirements vkMemReq = {};
    14542  bool requiresDedicatedAllocation = false;
    14543  bool prefersDedicatedAllocation = false;
    14544  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14545  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14546 
    14547  res = allocator->AllocateMemory(
    14548  vkMemReq,
    14549  requiresDedicatedAllocation,
    14550  prefersDedicatedAllocation,
    14551  VK_NULL_HANDLE, // dedicatedBuffer
    14552  *pImage, // dedicatedImage
    14553  *pAllocationCreateInfo,
    14554  suballocType,
    14555  pAllocation);
    14556 
    14557 #if VMA_RECORDING_ENABLED
    14558  if(allocator->GetRecorder() != VMA_NULL)
    14559  {
    14560  allocator->GetRecorder()->RecordCreateImage(
    14561  allocator->GetCurrentFrameIndex(),
    14562  *pImageCreateInfo,
    14563  *pAllocationCreateInfo,
    14564  *pAllocation);
    14565  }
    14566 #endif
    14567 
    14568  if(res >= 0)
    14569  {
    14570  // 3. Bind image with memory.
    14571  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14572  if(res >= 0)
    14573  {
    14574  // All steps succeeded.
    14575  #if VMA_STATS_STRING_ENABLED
    14576  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14577  #endif
    14578  if(pAllocationInfo != VMA_NULL)
    14579  {
    14580  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14581  }
    14582 
    14583  return VK_SUCCESS;
    14584  }
    14585  allocator->FreeMemory(*pAllocation);
    14586  *pAllocation = VK_NULL_HANDLE;
    14587  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14588  *pImage = VK_NULL_HANDLE;
    14589  return res;
    14590  }
    14591  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14592  *pImage = VK_NULL_HANDLE;
    14593  return res;
    14594  }
    14595  return res;
    14596 }
    14597 
    14598 void vmaDestroyImage(
    14599  VmaAllocator allocator,
    14600  VkImage image,
    14601  VmaAllocation allocation)
    14602 {
    14603  VMA_ASSERT(allocator);
    14604 
    14605  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14606  {
    14607  return;
    14608  }
    14609 
    14610  VMA_DEBUG_LOG("vmaDestroyImage");
    14611 
    14612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14613 
    14614 #if VMA_RECORDING_ENABLED
    14615  if(allocator->GetRecorder() != VMA_NULL)
    14616  {
    14617  allocator->GetRecorder()->RecordDestroyImage(
    14618  allocator->GetCurrentFrameIndex(),
    14619  allocation);
    14620  }
    14621 #endif
    14622 
    14623  if(image != VK_NULL_HANDLE)
    14624  {
    14625  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14626  }
    14627  if(allocation != VK_NULL_HANDLE)
    14628  {
    14629  allocator->FreeMemory(allocation);
    14630  }
    14631 }
    14632 
    14633 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1885
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1641
    @@ -82,7 +82,7 @@ $(function() {
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1588
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2307
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1638
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2552
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2577
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2096
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1485
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    @@ -102,13 +102,13 @@ $(function() {
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1775
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1593
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1774
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2556
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2581
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1667
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1784
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2564
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2589
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1979
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2547
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2572
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1594
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1519
    Represents main object of this library initialized.
    @@ -131,10 +131,10 @@ $(function() {
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1820
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2542
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2567
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2560
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2585
    Definition: vk_mem_alloc.h:1859
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2003
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1592
    @@ -151,7 +151,7 @@ $(function() {
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1617
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1551
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2562
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2587
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1990
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2204
    @@ -210,7 +210,7 @@ $(function() {
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2312
    Definition: vk_mem_alloc.h:1960
    Definition: vk_mem_alloc.h:1972
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2558
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2583
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1583
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1770
    @@ -230,6 +230,7 @@ $(function() {
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1586
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2113
    +
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2293
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    diff --git a/src/Tests.cpp b/src/Tests.cpp index e339f9c..f7536c4 100644 --- a/src/Tests.cpp +++ b/src/Tests.cpp @@ -2711,6 +2711,159 @@ static void TestPool_SameSize() vmaDestroyPool(g_hAllocator, pool); } +static void TestResize() +{ + wprintf(L"Testing vmaResizeAllocation...\n"); + + const VkDeviceSize KILOBYTE = 1024ull; + const VkDeviceSize MEGABYTE = KILOBYTE * 1024; + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 2 * MEGABYTE; + bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + + uint32_t memTypeIndex = UINT32_MAX; + TEST( vmaFindMemoryTypeIndexForBufferInfo(g_hAllocator, &bufCreateInfo, &allocCreateInfo, &memTypeIndex) == VK_SUCCESS ); + + VmaPoolCreateInfo poolCreateInfo = {}; + poolCreateInfo.flags = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT; + poolCreateInfo.blockSize = 8 * MEGABYTE; + poolCreateInfo.minBlockCount = 1; + poolCreateInfo.maxBlockCount = 1; + poolCreateInfo.memoryTypeIndex = memTypeIndex; + + VmaPool pool; + TEST( vmaCreatePool(g_hAllocator, &poolCreateInfo, &pool) == VK_SUCCESS ); + + allocCreateInfo.pool = pool; + + // Fill 8 MB pool with 4 * 2 MB allocations. + VmaAllocation allocs[4] = {}; + + VkMemoryRequirements memReq = {}; + memReq.memoryTypeBits = UINT32_MAX; + memReq.alignment = 4; + memReq.size = bufCreateInfo.size; + + VmaAllocationInfo allocInfo = {}; + + for(uint32_t i = 0; i < 4; ++i) + { + TEST( vmaAllocateMemory(g_hAllocator, &memReq, &allocCreateInfo, &allocs[i], nullptr) == VK_SUCCESS ); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 2MB + + // Case: Resize to the same size always succeeds. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 2 * MEGABYTE) == VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Shrink allocation at the end. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1ull * 1024 * 1024); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Shrink allocation before free space. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 512 * KILOBYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 512 * KILOBYTE); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 0.5MB, free 1.5MB + + // Case: Shrink allocation before next allocation. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + // Now it's: a0 1MB, free 1 MB, a1 2MB, a2 2MB, a3 0.5MB, free 1.5MB + + // Case: Grow allocation while there is even more space available. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 1 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + // Now it's: a0 1MB, free 1 MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Grow allocation while there is exact amount of free space available. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 2 * MEGABYTE) == VK_SUCCESS ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 2 * MEGABYTE); + } + + // Now it's: a0 2MB, a1 2MB, a2 2MB, a3 1MB, free 1MB + + // Case: Fail to grow when there is not enough free space due to next allocation. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[0], 3 * MEGABYTE) == VK_ERROR_OUT_OF_POOL_MEMORY ); + vmaGetAllocationInfo(g_hAllocator, allocs[0], &allocInfo); + TEST(allocInfo.size == 2 * MEGABYTE); + } + + // Case: Fail to grow when there is not enough free space due to end of memory block. + { + TEST( vmaResizeAllocation(g_hAllocator, allocs[3], 3 * MEGABYTE) == VK_ERROR_OUT_OF_POOL_MEMORY ); + vmaGetAllocationInfo(g_hAllocator, allocs[3], &allocInfo); + TEST(allocInfo.size == 1 * MEGABYTE); + } + + for(uint32_t i = 4; i--; ) + { + vmaFreeMemory(g_hAllocator, allocs[i]); + } + + vmaDestroyPool(g_hAllocator, pool); + + // Test dedicated allocation + { + VmaAllocationCreateInfo dedicatedAllocCreateInfo = {}; + dedicatedAllocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; + dedicatedAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + + VmaAllocation dedicatedAlloc = VK_NULL_HANDLE; + TEST( vmaAllocateMemory(g_hAllocator, &memReq, &dedicatedAllocCreateInfo, &dedicatedAlloc, nullptr) == VK_SUCCESS ); + + // Case: Resize to the same size always succeeds. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 2 * MEGABYTE) == VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Shrinking fails. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 1 * MEGABYTE) < VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + // Case: Growing fails. + { + TEST( vmaResizeAllocation(g_hAllocator, dedicatedAlloc, 3 * MEGABYTE) < VK_SUCCESS); + vmaGetAllocationInfo(g_hAllocator, dedicatedAlloc, &allocInfo); + TEST(allocInfo.size == 2ull * 1024 * 1024); + } + + vmaFreeMemory(g_hAllocator, dedicatedAlloc); + } +} + static bool ValidatePattern(const void* pMemory, size_t size, uint8_t pattern) { const uint8_t* pBytes = (const uint8_t*)pMemory; @@ -4275,7 +4428,7 @@ void Test() // ######################################## // ######################################## - BasicTestBuddyAllocator(); + TestResize(); return; } @@ -4287,6 +4440,7 @@ void Test() #else TestPool_SameSize(); TestHeapSizeLimit(); + TestResize(); #endif #if VMA_DEBUG_INITIALIZE_ALLOCATIONS TestAllocationsInitialization(); diff --git a/src/VmaReplay/VmaReplay.cpp b/src/VmaReplay/VmaReplay.cpp index 9635b03..a9acfa5 100644 --- a/src/VmaReplay/VmaReplay.cpp +++ b/src/VmaReplay/VmaReplay.cpp @@ -82,6 +82,7 @@ enum class VMA_FUNCTION TouchAllocation, GetAllocationInfo, MakePoolAllocationsLost, + ResizeAllocation, Count }; static const char* VMA_FUNCTION_NAMES[] = { @@ -104,6 +105,7 @@ static const char* VMA_FUNCTION_NAMES[] = { "vmaTouchAllocation", "vmaGetAllocationInfo", "vmaMakePoolAllocationsLost", + "vmaResizeAllocation", }; static_assert( _countof(VMA_FUNCTION_NAMES) == (size_t)VMA_FUNCTION::Count, @@ -143,7 +145,7 @@ static size_t g_DumpStatsAfterLineNextIndex = 0; static bool ValidateFileVersion() { if(GetVersionMajor(g_FileVersion) == 1 && - GetVersionMinor(g_FileVersion) <= 3) + GetVersionMinor(g_FileVersion) <= 4) { return true; } @@ -1015,6 +1017,7 @@ private: void ExecuteTouchAllocation(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteGetAllocationInfo(size_t lineNumber, const CsvSplit& csvSplit); void ExecuteMakePoolAllocationsLost(size_t lineNumber, const CsvSplit& csvSplit); + void ExecuteResizeAllocation(size_t lineNumber, const CsvSplit& csvSplit); void DestroyAllocation(size_t lineNumber, const CsvSplit& csvSplit); }; @@ -1156,6 +1159,8 @@ void Player::ExecuteLine(size_t lineNumber, const StrRange& line) ExecuteGetAllocationInfo(lineNumber, csvSplit); else if(StrRangeEq(functionName, "vmaMakePoolAllocationsLost")) ExecuteMakePoolAllocationsLost(lineNumber, csvSplit); + else if(StrRangeEq(functionName, "vmaResizeAllocation")) + ExecuteResizeAllocation(lineNumber, csvSplit); else { if(IssueWarning()) @@ -2599,6 +2604,45 @@ void Player::ExecuteMakePoolAllocationsLost(size_t lineNumber, const CsvSplit& c } } +void Player::ExecuteResizeAllocation(size_t lineNumber, const CsvSplit& csvSplit) +{ + m_Stats.RegisterFunctionCall(VMA_FUNCTION::ResizeAllocation); + + if(ValidateFunctionParameterCount(lineNumber, csvSplit, 2, false)) + { + uint64_t origPtr = 0; + uint64_t newSize = 0; + + if(StrRangeToPtr(csvSplit.GetRange(FIRST_PARAM_INDEX), origPtr) && + StrRangeToUint(csvSplit.GetRange(FIRST_PARAM_INDEX + 1), newSize)) + { + if(origPtr != 0) + { + const auto it = m_Allocations.find(origPtr); + if(it != m_Allocations.end()) + { + vmaResizeAllocation(m_Allocator, it->second.allocation, newSize); + UpdateMemStats(); + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Allocation %llX not found.\n", lineNumber, origPtr); + } + } + } + } + else + { + if(IssueWarning()) + { + printf("Line %zu: Invalid parameters for vmaResizeAllocation.\n", lineNumber); + } + } + } +} + //////////////////////////////////////////////////////////////////////////////// // Main functions diff --git a/src/VulkanSample.cpp b/src/VulkanSample.cpp index 99727f3..8c7b2c6 100644 --- a/src/VulkanSample.cpp +++ b/src/VulkanSample.cpp @@ -1306,6 +1306,15 @@ static void InitializeApplication() allocatorInfo.pAllocationCallbacks = &cpuAllocationCallbacks; } + // Uncomment to enable recording to CSV file. + /* + { + VmaRecordSettings recordSettings = {}; + recordSettings.pFilePath = "VulkanSample.csv"; + allocatorInfo.pRecordSettings = &recordSettings; + } + */ + ERR_GUARD_VULKAN( vmaCreateAllocator(&allocatorInfo, &g_hAllocator) ); // Retrieve queue (doesn't need to be destroyed) diff --git a/src/vk_mem_alloc.h b/src/vk_mem_alloc.h index 6fb57d0..6326324 100644 --- a/src/vk_mem_alloc.h +++ b/src/vk_mem_alloc.h @@ -2374,6 +2374,31 @@ void vmaFreeMemory( VmaAllocator allocator, VmaAllocation allocation); +/** \brief Tries to resize an allocation in place, if there is enough free memory after it. + +Tries to change allocation's size without moving or reallocating it. +You can both shrink and grow allocation size. +When growing, it succeeds only when the allocation belongs to a memory block with enough +free space after it. + +Returns `VK_SUCCESS` if allocation's size has been successfully changed. +Returns `VK_ERROR_OUT_OF_POOL_MEMORY` if allocation's size could not be changed. + +After successful call to this function, VmaAllocationInfo::size of this allocation changes. +All other parameters stay the same: memory pool and type, alignment, offset, mapped pointer. + +- Calling this function on allocation that is in lost state fails with result `VK_ERROR_VALIDATION_FAILED_EXT`. +- Calling this function with `newSize` same as current allocation size does nothing and returns `VK_SUCCESS`. +- Resizing dedicated allocations, as well as allocations created in pools that use linear + or buddy algorithm, is not supported. + The function returns `VK_ERROR_FEATURE_NOT_PRESENT` in such cases. + Support may be added in the future. +*/ +VkResult vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize); + /** \brief Returns current information about specified allocation and atomically marks it as used in current frame. Current paramters of given allocation are returned in `pAllocationInfo`. @@ -4504,7 +4529,9 @@ public: void ChangeBlockAllocation( VmaAllocator hAllocator, VmaDeviceMemoryBlock* block, - VkDeviceSize offset); + VkDeviceSize offset); + + void ChangeSize(VkDeviceSize newSize); // pMappedData not null means allocation is created with MAPPED flag. void InitDedicatedAllocation( @@ -4766,6 +4793,9 @@ public: virtual void Free(const VmaAllocation allocation) = 0; virtual void FreeAtOffset(VkDeviceSize offset) = 0; + // Tries to resize (grow or shrink) space for given allocation, in place. + virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; } + protected: const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } @@ -4845,6 +4875,8 @@ public: virtual void Free(const VmaAllocation allocation); virtual void FreeAtOffset(VkDeviceSize offset); + virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize); + private: uint32_t m_FreeCount; VkDeviceSize m_SumFreeSize; @@ -5597,6 +5629,10 @@ public: VmaAllocation allocation); void RecordFreeMemory(uint32_t frameIndex, VmaAllocation allocation); + void RecordResizeAllocation( + uint32_t frameIndex, + VmaAllocation allocation, + VkDeviceSize newSize); void RecordSetAllocationUserData(uint32_t frameIndex, VmaAllocation allocation, const void* pUserData); @@ -5763,6 +5799,10 @@ public: // Main deallocation function. void FreeMemory(const VmaAllocation allocation); + VkResult ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize); + void CalculateStats(VmaStats* pStats); #if VMA_STATS_STRING_ENABLED @@ -6296,6 +6336,12 @@ void VmaAllocation_T::ChangeBlockAllocation( m_BlockAllocation.m_Offset = offset; } +void VmaAllocation_T::ChangeSize(VkDeviceSize newSize) +{ + VMA_ASSERT(newSize > 0); + m_Size = newSize; +} + VkDeviceSize VmaAllocation_T::GetOffset() const { switch(m_Type) @@ -7222,6 +7268,133 @@ void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) VMA_ASSERT(0 && "Not found!"); } +bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) +{ + typedef VmaSuballocationList::iterator iter_type; + for(iter_type suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == alloc) + { + iter_type nextItem = suballocItem; + ++nextItem; + + // Should have been ensured on higher level. + VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0); + + // Shrinking. + if(newSize < alloc->GetSize()) + { + const VkDeviceSize sizeDiff = suballoc.size - newSize; + + // There is next item. + if(nextItem != m_Suballocations.end()) + { + // Next item is free. + if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + // Grow this next item backward. + UnregisterFreeSuballocation(nextItem); + nextItem->offset -= sizeDiff; + nextItem->size += sizeDiff; + RegisterFreeSuballocation(nextItem); + } + // Next item is not free. + else + { + // Create free item after current one. + VmaSuballocation newFreeSuballoc; + newFreeSuballoc.hAllocation = VK_NULL_HANDLE; + newFreeSuballoc.offset = suballoc.offset + newSize; + newFreeSuballoc.size = sizeDiff; + newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc); + RegisterFreeSuballocation(newFreeSuballocIt); + + ++m_FreeCount; + } + } + // This is the last item. + else + { + // Create free item at the end. + VmaSuballocation newFreeSuballoc; + newFreeSuballoc.hAllocation = VK_NULL_HANDLE; + newFreeSuballoc.offset = suballoc.offset + newSize; + newFreeSuballoc.size = sizeDiff; + newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + m_Suballocations.push_back(newFreeSuballoc); + + iter_type newFreeSuballocIt = m_Suballocations.end(); + RegisterFreeSuballocation(--newFreeSuballocIt); + + ++m_FreeCount; + } + + suballoc.size = newSize; + m_SumFreeSize += sizeDiff; + } + // Growing. + else + { + const VkDeviceSize sizeDiff = newSize - suballoc.size; + + // There is next item. + if(nextItem != m_Suballocations.end()) + { + // Next item is free. + if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + // There is not enough free space, including margin. + if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN) + { + return false; + } + + // There is more free space than required. + if(nextItem->size > sizeDiff) + { + // Move and shrink this next item. + UnregisterFreeSuballocation(nextItem); + nextItem->offset += sizeDiff; + nextItem->size -= sizeDiff; + RegisterFreeSuballocation(nextItem); + } + // There is exactly the amount of free space required. + else + { + // Remove this next free item. + UnregisterFreeSuballocation(nextItem); + m_Suballocations.erase(nextItem); + --m_FreeCount; + } + } + // Next item is not free - there is no space to grow. + else + { + return false; + } + } + // This is the last item - there is no space to grow. + else + { + return false; + } + + suballoc.size = newSize; + m_SumFreeSize -= sizeDiff; + } + + // We cannot call Validate() here because alloc object is updated to new size outside of this call. + return true; + } + } + VMA_ASSERT(0 && "Not found!"); + return false; +} + bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const { VkDeviceSize lastSize = 0; @@ -11368,7 +11541,7 @@ VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) // Write header. fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); - fprintf(m_File, "%s\n", "1,3"); + fprintf(m_File, "%s\n", "1,4"); return VK_SUCCESS; } @@ -11524,6 +11697,20 @@ void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, Flush(); } +void VmaRecorder::RecordResizeAllocation( + uint32_t frameIndex, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, newSize); + Flush(); +} + void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, VmaAllocation allocation, const void* pUserData) @@ -12487,6 +12674,40 @@ void VmaAllocator_T::FreeMemory(const VmaAllocation allocation) vma_delete(this, allocation); } +VkResult VmaAllocator_T::ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(newSize == alloc->GetSize()) + { + return VK_SUCCESS; + } + + switch(alloc->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return VK_ERROR_FEATURE_NOT_PRESENT; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize)) + { + alloc->ChangeSize(newSize); + VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate()); + return VK_SUCCESS; + } + else + { + return VK_ERROR_OUT_OF_POOL_MEMORY; + } + default: + VMA_ASSERT(0); + return VK_ERROR_VALIDATION_FAILED_EXT; + } +} + void VmaAllocator_T::CalculateStats(VmaStats* pStats) { // Initialize. @@ -13889,6 +14110,30 @@ void vmaFreeMemory( allocator->FreeMemory(allocation); } +VkResult vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaResizeAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordResizeAllocation( + allocator->GetCurrentFrameIndex(), + allocation, + newSize); + } +#endif + + return allocator->ResizeAllocation(allocation, newSize); +} + void vmaGetAllocationInfo( VmaAllocator allocator, VmaAllocation allocation, From 7f97202addbf9ce6d94a07b4338ca0ce11804a7e Mon Sep 17 00:00:00 2001 From: Adam Sawicki Date: Fri, 16 Nov 2018 13:43:34 +0100 Subject: [PATCH 12/15] Updated documentation of memory mapping with known bug in MoltenVK, based on #47. Thanks @DiegoAce ! --- docs/html/memory_mapping.html | 1 + docs/html/vk__mem__alloc_8h_source.html | 260 ++++++++++++------------ src/vk_mem_alloc.h | 4 +- 3 files changed, 134 insertions(+), 131 deletions(-) diff --git a/docs/html/memory_mapping.html b/docs/html/memory_mapping.html index e464c14..c584e46 100644 --- a/docs/html/memory_mapping.html +++ b/docs/html/memory_mapping.html @@ -82,6 +82,7 @@ Persistently mapped memory
    VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    bufCreateInfo.size = sizeof(ConstantBuffer);
    bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
    VmaAllocationCreateInfo allocCreateInfo = {};
    allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY;
    VkBuffer buf;
    vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
    // Buffer is already mapped. You can access its memory.
    memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));

    There are some exceptions though, when you should consider mapping memory only for a short period of time:

    • When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2), device is discrete AMD GPU, and memory type is the special 256 MiB pool of DEVICE_LOCAL + HOST_VISIBLE memory (selected when you use VMA_MEMORY_USAGE_CPU_TO_GPU), then whenever a memory block allocated from this memory type stays mapped for the time of any call to vkQueueSubmit() or vkQueuePresentKHR(), this block is migrated by WDDM to system RAM, which degrades performance. It doesn't matter if that particular memory block is actually used by the command buffer being submitted.
    • +
    • On Mac/MoltenVK there is a known bug - Issue #175 which requires unmapping before GPU can see updated texture.
    • Keeping many large memory blocks mapped may impact performance or stability of some debugging tools.

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 3e42ce3..cc767f1 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,189 +65,189 @@ $(function() {
    vk_mem_alloc.h

    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1477 /*
    1478 Define this macro to 0/1 to disable/enable support for recording functionality,
    1479 available through VmaAllocatorCreateInfo::pRecordSettings.
    1480 */
    1481 #ifndef VMA_RECORDING_ENABLED
    1482  #ifdef _WIN32
    1483  #define VMA_RECORDING_ENABLED 1
    1484  #else
    1485  #define VMA_RECORDING_ENABLED 0
    1486  #endif
    1487 #endif
    1488 
    1489 #ifndef NOMINMAX
    1490  #define NOMINMAX // For windows.h
    1491 #endif
    1492 
    1493 #include <vulkan/vulkan.h>
    1494 
    1495 #if VMA_RECORDING_ENABLED
    1496  #include <windows.h>
    1497 #endif
    1498 
    1499 #if !defined(VMA_DEDICATED_ALLOCATION)
    1500  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1501  #define VMA_DEDICATED_ALLOCATION 1
    1502  #else
    1503  #define VMA_DEDICATED_ALLOCATION 0
    1504  #endif
    1505 #endif
    1506 
    1516 VK_DEFINE_HANDLE(VmaAllocator)
    1517 
    1518 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1520  VmaAllocator allocator,
    1521  uint32_t memoryType,
    1522  VkDeviceMemory memory,
    1523  VkDeviceSize size);
    1525 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1526  VmaAllocator allocator,
    1527  uint32_t memoryType,
    1528  VkDeviceMemory memory,
    1529  VkDeviceSize size);
    1530 
    1544 
    1574 
    1577 typedef VkFlags VmaAllocatorCreateFlags;
    1578 
    1583 typedef struct VmaVulkanFunctions {
    1584  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1585  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1586  PFN_vkAllocateMemory vkAllocateMemory;
    1587  PFN_vkFreeMemory vkFreeMemory;
    1588  PFN_vkMapMemory vkMapMemory;
    1589  PFN_vkUnmapMemory vkUnmapMemory;
    1590  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1591  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1592  PFN_vkBindBufferMemory vkBindBufferMemory;
    1593  PFN_vkBindImageMemory vkBindImageMemory;
    1594  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1595  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1596  PFN_vkCreateBuffer vkCreateBuffer;
    1597  PFN_vkDestroyBuffer vkDestroyBuffer;
    1598  PFN_vkCreateImage vkCreateImage;
    1599  PFN_vkDestroyImage vkDestroyImage;
    1600 #if VMA_DEDICATED_ALLOCATION
    1601  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1602  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1603 #endif
    1605 
    1607 typedef enum VmaRecordFlagBits {
    1614 
    1617 typedef VkFlags VmaRecordFlags;
    1618 
    1620 typedef struct VmaRecordSettings
    1621 {
    1631  const char* pFilePath;
    1633 
    1636 {
    1640 
    1641  VkPhysicalDevice physicalDevice;
    1643 
    1644  VkDevice device;
    1646 
    1649 
    1650  const VkAllocationCallbacks* pAllocationCallbacks;
    1652 
    1691  const VkDeviceSize* pHeapSizeLimit;
    1712 
    1714 VkResult vmaCreateAllocator(
    1715  const VmaAllocatorCreateInfo* pCreateInfo,
    1716  VmaAllocator* pAllocator);
    1717 
    1719 void vmaDestroyAllocator(
    1720  VmaAllocator allocator);
    1721 
    1727  VmaAllocator allocator,
    1728  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1729 
    1735  VmaAllocator allocator,
    1736  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1737 
    1745  VmaAllocator allocator,
    1746  uint32_t memoryTypeIndex,
    1747  VkMemoryPropertyFlags* pFlags);
    1748 
    1758  VmaAllocator allocator,
    1759  uint32_t frameIndex);
    1760 
    1763 typedef struct VmaStatInfo
    1764 {
    1766  uint32_t blockCount;
    1772  VkDeviceSize usedBytes;
    1774  VkDeviceSize unusedBytes;
    1777 } VmaStatInfo;
    1778 
    1780 typedef struct VmaStats
    1781 {
    1782  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1783  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1785 } VmaStats;
    1786 
    1788 void vmaCalculateStats(
    1789  VmaAllocator allocator,
    1790  VmaStats* pStats);
    1791 
    1792 #define VMA_STATS_STRING_ENABLED 1
    1793 
    1794 #if VMA_STATS_STRING_ENABLED
    1795 
    1797 
    1799 void vmaBuildStatsString(
    1800  VmaAllocator allocator,
    1801  char** ppStatsString,
    1802  VkBool32 detailedMap);
    1803 
    1804 void vmaFreeStatsString(
    1805  VmaAllocator allocator,
    1806  char* pStatsString);
    1807 
    1808 #endif // #if VMA_STATS_STRING_ENABLED
    1809 
    1818 VK_DEFINE_HANDLE(VmaPool)
    1819 
    1820 typedef enum VmaMemoryUsage
    1821 {
    1870 } VmaMemoryUsage;
    1871 
    1886 
    1941 
    1954 
    1964 
    1971 
    1975 
    1977 {
    1990  VkMemoryPropertyFlags requiredFlags;
    1995  VkMemoryPropertyFlags preferredFlags;
    2003  uint32_t memoryTypeBits;
    2016  void* pUserData;
    2018 
    2035 VkResult vmaFindMemoryTypeIndex(
    2036  VmaAllocator allocator,
    2037  uint32_t memoryTypeBits,
    2038  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2039  uint32_t* pMemoryTypeIndex);
    2040 
    2054  VmaAllocator allocator,
    2055  const VkBufferCreateInfo* pBufferCreateInfo,
    2056  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2057  uint32_t* pMemoryTypeIndex);
    2058 
    2072  VmaAllocator allocator,
    2073  const VkImageCreateInfo* pImageCreateInfo,
    2074  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2075  uint32_t* pMemoryTypeIndex);
    2076 
    2097 
    2114 
    2125 
    2131 
    2134 typedef VkFlags VmaPoolCreateFlags;
    2135 
    2138 typedef struct VmaPoolCreateInfo {
    2153  VkDeviceSize blockSize;
    2182 
    2185 typedef struct VmaPoolStats {
    2188  VkDeviceSize size;
    2191  VkDeviceSize unusedSize;
    2204  VkDeviceSize unusedRangeSizeMax;
    2207  size_t blockCount;
    2208 } VmaPoolStats;
    2209 
    2216 VkResult vmaCreatePool(
    2217  VmaAllocator allocator,
    2218  const VmaPoolCreateInfo* pCreateInfo,
    2219  VmaPool* pPool);
    2220 
    2223 void vmaDestroyPool(
    2224  VmaAllocator allocator,
    2225  VmaPool pool);
    2226 
    2233 void vmaGetPoolStats(
    2234  VmaAllocator allocator,
    2235  VmaPool pool,
    2236  VmaPoolStats* pPoolStats);
    2237 
    2245  VmaAllocator allocator,
    2246  VmaPool pool,
    2247  size_t* pLostAllocationCount);
    2248 
    2263 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2264 
    2289 VK_DEFINE_HANDLE(VmaAllocation)
    2290 
    2291 
    2293 typedef struct VmaAllocationInfo {
    2298  uint32_t memoryType;
    2307  VkDeviceMemory deviceMemory;
    2312  VkDeviceSize offset;
    2317  VkDeviceSize size;
    2331  void* pUserData;
    2333 
    2344 VkResult vmaAllocateMemory(
    2345  VmaAllocator allocator,
    2346  const VkMemoryRequirements* pVkMemoryRequirements,
    2347  const VmaAllocationCreateInfo* pCreateInfo,
    2348  VmaAllocation* pAllocation,
    2349  VmaAllocationInfo* pAllocationInfo);
    2350 
    2358  VmaAllocator allocator,
    2359  VkBuffer buffer,
    2360  const VmaAllocationCreateInfo* pCreateInfo,
    2361  VmaAllocation* pAllocation,
    2362  VmaAllocationInfo* pAllocationInfo);
    2363 
    2365 VkResult vmaAllocateMemoryForImage(
    2366  VmaAllocator allocator,
    2367  VkImage image,
    2368  const VmaAllocationCreateInfo* pCreateInfo,
    2369  VmaAllocation* pAllocation,
    2370  VmaAllocationInfo* pAllocationInfo);
    2371 
    2373 void vmaFreeMemory(
    2374  VmaAllocator allocator,
    2375  VmaAllocation allocation);
    2376 
    2397 VkResult vmaResizeAllocation(
    2398  VmaAllocator allocator,
    2399  VmaAllocation allocation,
    2400  VkDeviceSize newSize);
    2401 
    2419  VmaAllocator allocator,
    2420  VmaAllocation allocation,
    2421  VmaAllocationInfo* pAllocationInfo);
    2422 
    2437 VkBool32 vmaTouchAllocation(
    2438  VmaAllocator allocator,
    2439  VmaAllocation allocation);
    2440 
    2455  VmaAllocator allocator,
    2456  VmaAllocation allocation,
    2457  void* pUserData);
    2458 
    2470  VmaAllocator allocator,
    2471  VmaAllocation* pAllocation);
    2472 
    2507 VkResult vmaMapMemory(
    2508  VmaAllocator allocator,
    2509  VmaAllocation allocation,
    2510  void** ppData);
    2511 
    2516 void vmaUnmapMemory(
    2517  VmaAllocator allocator,
    2518  VmaAllocation allocation);
    2519 
    2532 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2533 
    2546 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2547 
    2564 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2565 
    2567 typedef struct VmaDefragmentationInfo {
    2572  VkDeviceSize maxBytesToMove;
    2579 
    2581 typedef struct VmaDefragmentationStats {
    2583  VkDeviceSize bytesMoved;
    2585  VkDeviceSize bytesFreed;
    2591 
    2630 VkResult vmaDefragment(
    2631  VmaAllocator allocator,
    2632  VmaAllocation* pAllocations,
    2633  size_t allocationCount,
    2634  VkBool32* pAllocationsChanged,
    2635  const VmaDefragmentationInfo *pDefragmentationInfo,
    2636  VmaDefragmentationStats* pDefragmentationStats);
    2637 
    2650 VkResult vmaBindBufferMemory(
    2651  VmaAllocator allocator,
    2652  VmaAllocation allocation,
    2653  VkBuffer buffer);
    2654 
    2667 VkResult vmaBindImageMemory(
    2668  VmaAllocator allocator,
    2669  VmaAllocation allocation,
    2670  VkImage image);
    2671 
    2698 VkResult vmaCreateBuffer(
    2699  VmaAllocator allocator,
    2700  const VkBufferCreateInfo* pBufferCreateInfo,
    2701  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2702  VkBuffer* pBuffer,
    2703  VmaAllocation* pAllocation,
    2704  VmaAllocationInfo* pAllocationInfo);
    2705 
    2717 void vmaDestroyBuffer(
    2718  VmaAllocator allocator,
    2719  VkBuffer buffer,
    2720  VmaAllocation allocation);
    2721 
    2723 VkResult vmaCreateImage(
    2724  VmaAllocator allocator,
    2725  const VkImageCreateInfo* pImageCreateInfo,
    2726  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2727  VkImage* pImage,
    2728  VmaAllocation* pAllocation,
    2729  VmaAllocationInfo* pAllocationInfo);
    2730 
    2742 void vmaDestroyImage(
    2743  VmaAllocator allocator,
    2744  VkImage image,
    2745  VmaAllocation allocation);
    2746 
    2747 #ifdef __cplusplus
    2748 }
    2749 #endif
    2750 
    2751 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2752 
    2753 // For Visual Studio IntelliSense.
    2754 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2755 #define VMA_IMPLEMENTATION
    2756 #endif
    2757 
    2758 #ifdef VMA_IMPLEMENTATION
    2759 #undef VMA_IMPLEMENTATION
    2760 
    2761 #include <cstdint>
    2762 #include <cstdlib>
    2763 #include <cstring>
    2764 
    2765 /*******************************************************************************
    2766 CONFIGURATION SECTION
    2767 
    2768 Define some of these macros before each #include of this header or change them
    2769 here if you need other then default behavior depending on your environment.
    2770 */
    2771 
    2772 /*
    2773 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2774 internally, like:
    2775 
    2776  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2777 
    2778 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2779 VmaAllocatorCreateInfo::pVulkanFunctions.
    2780 */
    2781 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2782 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2783 #endif
    2784 
    2785 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2786 //#define VMA_USE_STL_CONTAINERS 1
    2787 
    2788 /* Set this macro to 1 to make the library including and using STL containers:
    2789 std::pair, std::vector, std::list, std::unordered_map.
    2790 
    2791 Set it to 0 or undefined to make the library using its own implementation of
    2792 the containers.
    2793 */
    2794 #if VMA_USE_STL_CONTAINERS
    2795  #define VMA_USE_STL_VECTOR 1
    2796  #define VMA_USE_STL_UNORDERED_MAP 1
    2797  #define VMA_USE_STL_LIST 1
    2798 #endif
    2799 
    2800 #if VMA_USE_STL_VECTOR
    2801  #include <vector>
    2802 #endif
    2803 
    2804 #if VMA_USE_STL_UNORDERED_MAP
    2805  #include <unordered_map>
    2806 #endif
    2807 
    2808 #if VMA_USE_STL_LIST
    2809  #include <list>
    2810 #endif
    2811 
    2812 /*
    2813 Following headers are used in this CONFIGURATION section only, so feel free to
    2814 remove them if not needed.
    2815 */
    2816 #include <cassert> // for assert
    2817 #include <algorithm> // for min, max
    2818 #include <mutex> // for std::mutex
    2819 #include <atomic> // for std::atomic
    2820 
    2821 #ifndef VMA_NULL
    2822  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2823  #define VMA_NULL nullptr
    2824 #endif
    2825 
    2826 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2827 #include <cstdlib>
    2828 void *aligned_alloc(size_t alignment, size_t size)
    2829 {
    2830  // alignment must be >= sizeof(void*)
    2831  if(alignment < sizeof(void*))
    2832  {
    2833  alignment = sizeof(void*);
    2834  }
    2835 
    2836  return memalign(alignment, size);
    2837 }
    2838 #elif defined(__APPLE__) || defined(__ANDROID__)
    2839 #include <cstdlib>
    2840 void *aligned_alloc(size_t alignment, size_t size)
    2841 {
    2842  // alignment must be >= sizeof(void*)
    2843  if(alignment < sizeof(void*))
    2844  {
    2845  alignment = sizeof(void*);
    2846  }
    2847 
    2848  void *pointer;
    2849  if(posix_memalign(&pointer, alignment, size) == 0)
    2850  return pointer;
    2851  return VMA_NULL;
    2852 }
    2853 #endif
    2854 
    2855 // If your compiler is not compatible with C++11 and definition of
    2856 // aligned_alloc() function is missing, uncommeting following line may help:
    2857 
    2858 //#include <malloc.h>
    2859 
    2860 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2861 #ifndef VMA_ASSERT
    2862  #ifdef _DEBUG
    2863  #define VMA_ASSERT(expr) assert(expr)
    2864  #else
    2865  #define VMA_ASSERT(expr)
    2866  #endif
    2867 #endif
    2868 
    2869 // Assert that will be called very often, like inside data structures e.g. operator[].
    2870 // Making it non-empty can make program slow.
    2871 #ifndef VMA_HEAVY_ASSERT
    2872  #ifdef _DEBUG
    2873  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2874  #else
    2875  #define VMA_HEAVY_ASSERT(expr)
    2876  #endif
    2877 #endif
    2878 
    2879 #ifndef VMA_ALIGN_OF
    2880  #define VMA_ALIGN_OF(type) (__alignof(type))
    2881 #endif
    2882 
    2883 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2884  #if defined(_WIN32)
    2885  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2886  #else
    2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2888  #endif
    2889 #endif
    2890 
    2891 #ifndef VMA_SYSTEM_FREE
    2892  #if defined(_WIN32)
    2893  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2894  #else
    2895  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2896  #endif
    2897 #endif
    2898 
    2899 #ifndef VMA_MIN
    2900  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2901 #endif
    2902 
    2903 #ifndef VMA_MAX
    2904  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2905 #endif
    2906 
    2907 #ifndef VMA_SWAP
    2908  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2909 #endif
    2910 
    2911 #ifndef VMA_SORT
    2912  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2913 #endif
    2914 
    2915 #ifndef VMA_DEBUG_LOG
    2916  #define VMA_DEBUG_LOG(format, ...)
    2917  /*
    2918  #define VMA_DEBUG_LOG(format, ...) do { \
    2919  printf(format, __VA_ARGS__); \
    2920  printf("\n"); \
    2921  } while(false)
    2922  */
    2923 #endif
    2924 
    2925 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2926 #if VMA_STATS_STRING_ENABLED
    2927  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2928  {
    2929  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2930  }
    2931  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2932  {
    2933  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2934  }
    2935  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2936  {
    2937  snprintf(outStr, strLen, "%p", ptr);
    2938  }
    2939 #endif
    2940 
    2941 #ifndef VMA_MUTEX
    2942  class VmaMutex
    2943  {
    2944  public:
    2945  VmaMutex() { }
    2946  ~VmaMutex() { }
    2947  void Lock() { m_Mutex.lock(); }
    2948  void Unlock() { m_Mutex.unlock(); }
    2949  private:
    2950  std::mutex m_Mutex;
    2951  };
    2952  #define VMA_MUTEX VmaMutex
    2953 #endif
    2954 
    2955 /*
    2956 If providing your own implementation, you need to implement a subset of std::atomic:
    2957 
    2958 - Constructor(uint32_t desired)
    2959 - uint32_t load() const
    2960 - void store(uint32_t desired)
    2961 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2962 */
    2963 #ifndef VMA_ATOMIC_UINT32
    2964  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2965 #endif
    2966 
    2967 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2968 
    2972  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2973 #endif
    2974 
    2975 #ifndef VMA_DEBUG_ALIGNMENT
    2976 
    2980  #define VMA_DEBUG_ALIGNMENT (1)
    2981 #endif
    2982 
    2983 #ifndef VMA_DEBUG_MARGIN
    2984 
    2988  #define VMA_DEBUG_MARGIN (0)
    2989 #endif
    2990 
    2991 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2992 
    2996  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2997 #endif
    2998 
    2999 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3000 
    3005  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3006 #endif
    3007 
    3008 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3009 
    3013  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3014 #endif
    3015 
    3016 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3017 
    3021  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3022 #endif
    3023 
    3024 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3025  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3027 #endif
    3028 
    3029 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3030  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_CLASS_NO_COPY
    3035  #define VMA_CLASS_NO_COPY(className) \
    3036  private: \
    3037  className(const className&) = delete; \
    3038  className& operator=(const className&) = delete;
    3039 #endif
    3040 
    3041 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3042 
    3043 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3044 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3045 
    3046 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3047 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3048 
    3049 /*******************************************************************************
    3050 END OF CONFIGURATION
    3051 */
    3052 
    3053 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3054  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3055 
    3056 // Returns number of bits set to 1 in (v).
    3057 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3058 {
    3059  uint32_t c = v - ((v >> 1) & 0x55555555);
    3060  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3061  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3062  c = ((c >> 8) + c) & 0x00FF00FF;
    3063  c = ((c >> 16) + c) & 0x0000FFFF;
    3064  return c;
    3065 }
    3066 
    3067 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3068 // Use types like uint32_t, uint64_t as T.
    3069 template <typename T>
    3070 static inline T VmaAlignUp(T val, T align)
    3071 {
    3072  return (val + align - 1) / align * align;
    3073 }
    3074 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3075 // Use types like uint32_t, uint64_t as T.
    3076 template <typename T>
    3077 static inline T VmaAlignDown(T val, T align)
    3078 {
    3079  return val / align * align;
    3080 }
    3081 
    3082 // Division with mathematical rounding to nearest number.
    3083 template <typename T>
    3084 static inline T VmaRoundDiv(T x, T y)
    3085 {
    3086  return (x + (y / (T)2)) / y;
    3087 }
    3088 
    3089 /*
    3090 Returns true if given number is a power of two.
    3091 T must be unsigned integer number or signed integer but always nonnegative.
    3092 For 0 returns true.
    3093 */
    3094 template <typename T>
    3095 inline bool VmaIsPow2(T x)
    3096 {
    3097  return (x & (x-1)) == 0;
    3098 }
    3099 
    3100 // Returns smallest power of 2 greater or equal to v.
    3101 static inline uint32_t VmaNextPow2(uint32_t v)
    3102 {
    3103  v--;
    3104  v |= v >> 1;
    3105  v |= v >> 2;
    3106  v |= v >> 4;
    3107  v |= v >> 8;
    3108  v |= v >> 16;
    3109  v++;
    3110  return v;
    3111 }
    3112 static inline uint64_t VmaNextPow2(uint64_t v)
    3113 {
    3114  v--;
    3115  v |= v >> 1;
    3116  v |= v >> 2;
    3117  v |= v >> 4;
    3118  v |= v >> 8;
    3119  v |= v >> 16;
    3120  v |= v >> 32;
    3121  v++;
    3122  return v;
    3123 }
    3124 
    3125 // Returns largest power of 2 less or equal to v.
    3126 static inline uint32_t VmaPrevPow2(uint32_t v)
    3127 {
    3128  v |= v >> 1;
    3129  v |= v >> 2;
    3130  v |= v >> 4;
    3131  v |= v >> 8;
    3132  v |= v >> 16;
    3133  v = v ^ (v >> 1);
    3134  return v;
    3135 }
    3136 static inline uint64_t VmaPrevPow2(uint64_t v)
    3137 {
    3138  v |= v >> 1;
    3139  v |= v >> 2;
    3140  v |= v >> 4;
    3141  v |= v >> 8;
    3142  v |= v >> 16;
    3143  v |= v >> 32;
    3144  v = v ^ (v >> 1);
    3145  return v;
    3146 }
    3147 
    3148 static inline bool VmaStrIsEmpty(const char* pStr)
    3149 {
    3150  return pStr == VMA_NULL || *pStr == '\0';
    3151 }
    3152 
    3153 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3154 {
    3155  switch(algorithm)
    3156  {
    3158  return "Linear";
    3160  return "Buddy";
    3161  case 0:
    3162  return "Default";
    3163  default:
    3164  VMA_ASSERT(0);
    3165  return "";
    3166  }
    3167 }
    3168 
    3169 #ifndef VMA_SORT
    3170 
    3171 template<typename Iterator, typename Compare>
    3172 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3173 {
    3174  Iterator centerValue = end; --centerValue;
    3175  Iterator insertIndex = beg;
    3176  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3177  {
    3178  if(cmp(*memTypeIndex, *centerValue))
    3179  {
    3180  if(insertIndex != memTypeIndex)
    3181  {
    3182  VMA_SWAP(*memTypeIndex, *insertIndex);
    3183  }
    3184  ++insertIndex;
    3185  }
    3186  }
    3187  if(insertIndex != centerValue)
    3188  {
    3189  VMA_SWAP(*insertIndex, *centerValue);
    3190  }
    3191  return insertIndex;
    3192 }
    3193 
    3194 template<typename Iterator, typename Compare>
    3195 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3196 {
    3197  if(beg < end)
    3198  {
    3199  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3200  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3201  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3202  }
    3203 }
    3204 
    3205 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3206 
    3207 #endif // #ifndef VMA_SORT
    3208 
    3209 /*
    3210 Returns true if two memory blocks occupy overlapping pages.
    3211 ResourceA must be in less memory offset than ResourceB.
    3212 
    3213 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3214 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3215 */
    3216 static inline bool VmaBlocksOnSamePage(
    3217  VkDeviceSize resourceAOffset,
    3218  VkDeviceSize resourceASize,
    3219  VkDeviceSize resourceBOffset,
    3220  VkDeviceSize pageSize)
    3221 {
    3222  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3223  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3224  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3225  VkDeviceSize resourceBStart = resourceBOffset;
    3226  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3227  return resourceAEndPage == resourceBStartPage;
    3228 }
    3229 
    3230 enum VmaSuballocationType
    3231 {
    3232  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3233  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3234  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3235  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3236  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3237  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3238  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3239 };
    3240 
    3241 /*
    3242 Returns true if given suballocation types could conflict and must respect
    3243 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3244 or linear image and another one is optimal image. If type is unknown, behave
    3245 conservatively.
    3246 */
    3247 static inline bool VmaIsBufferImageGranularityConflict(
    3248  VmaSuballocationType suballocType1,
    3249  VmaSuballocationType suballocType2)
    3250 {
    3251  if(suballocType1 > suballocType2)
    3252  {
    3253  VMA_SWAP(suballocType1, suballocType2);
    3254  }
    3255 
    3256  switch(suballocType1)
    3257  {
    3258  case VMA_SUBALLOCATION_TYPE_FREE:
    3259  return false;
    3260  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3261  return true;
    3262  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3263  return
    3264  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3265  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3266  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3267  return
    3268  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3274  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3275  return false;
    3276  default:
    3277  VMA_ASSERT(0);
    3278  return true;
    3279  }
    3280 }
    3281 
    3282 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3283 {
    3284  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3285  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3286  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3287  {
    3288  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3289  }
    3290 }
    3291 
    3292 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3293 {
    3294  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3295  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3296  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3297  {
    3298  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3299  {
    3300  return false;
    3301  }
    3302  }
    3303  return true;
    3304 }
    3305 
    3306 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3307 struct VmaMutexLock
    3308 {
    3309  VMA_CLASS_NO_COPY(VmaMutexLock)
    3310 public:
    3311  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3312  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3313  {
    3314  if(m_pMutex)
    3315  {
    3316  m_pMutex->Lock();
    3317  }
    3318  }
    3319 
    3320  ~VmaMutexLock()
    3321  {
    3322  if(m_pMutex)
    3323  {
    3324  m_pMutex->Unlock();
    3325  }
    3326  }
    3327 
    3328 private:
    3329  VMA_MUTEX* m_pMutex;
    3330 };
    3331 
    3332 #if VMA_DEBUG_GLOBAL_MUTEX
    3333  static VMA_MUTEX gDebugGlobalMutex;
    3334  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3335 #else
    3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3337 #endif
    3338 
    3339 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3340 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3341 
    3342 /*
    3343 Performs binary search and returns iterator to first element that is greater or
    3344 equal to (key), according to comparison (cmp).
    3345 
    3346 Cmp should return true if first argument is less than second argument.
    3347 
    3348 Returned value is the found element, if present in the collection or place where
    3349 new element with value (key) should be inserted.
    3350 */
    3351 template <typename CmpLess, typename IterT, typename KeyT>
    3352 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3353 {
    3354  size_t down = 0, up = (end - beg);
    3355  while(down < up)
    3356  {
    3357  const size_t mid = (down + up) / 2;
    3358  if(cmp(*(beg+mid), key))
    3359  {
    3360  down = mid + 1;
    3361  }
    3362  else
    3363  {
    3364  up = mid;
    3365  }
    3366  }
    3367  return beg + down;
    3368 }
    3369 
    3371 // Memory allocation
    3372 
    3373 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3374 {
    3375  if((pAllocationCallbacks != VMA_NULL) &&
    3376  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3377  {
    3378  return (*pAllocationCallbacks->pfnAllocation)(
    3379  pAllocationCallbacks->pUserData,
    3380  size,
    3381  alignment,
    3382  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3383  }
    3384  else
    3385  {
    3386  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3387  }
    3388 }
    3389 
    3390 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3391 {
    3392  if((pAllocationCallbacks != VMA_NULL) &&
    3393  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3394  {
    3395  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3396  }
    3397  else
    3398  {
    3399  VMA_SYSTEM_FREE(ptr);
    3400  }
    3401 }
    3402 
    3403 template<typename T>
    3404 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3405 {
    3406  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3416 
    3417 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3418 
    3419 template<typename T>
    3420 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3421 {
    3422  ptr->~T();
    3423  VmaFree(pAllocationCallbacks, ptr);
    3424 }
    3425 
    3426 template<typename T>
    3427 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3428 {
    3429  if(ptr != VMA_NULL)
    3430  {
    3431  for(size_t i = count; i--; )
    3432  {
    3433  ptr[i].~T();
    3434  }
    3435  VmaFree(pAllocationCallbacks, ptr);
    3436  }
    3437 }
    3438 
    3439 // STL-compatible allocator.
    3440 template<typename T>
    3441 class VmaStlAllocator
    3442 {
    3443 public:
    3444  const VkAllocationCallbacks* const m_pCallbacks;
    3445  typedef T value_type;
    3446 
    3447  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3448  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3449 
    3450  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3451  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3452 
    3453  template<typename U>
    3454  bool operator==(const VmaStlAllocator<U>& rhs) const
    3455  {
    3456  return m_pCallbacks == rhs.m_pCallbacks;
    3457  }
    3458  template<typename U>
    3459  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks != rhs.m_pCallbacks;
    3462  }
    3463 
    3464  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3465 };
    3466 
    3467 #if VMA_USE_STL_VECTOR
    3468 
    3469 #define VmaVector std::vector
    3470 
    3471 template<typename T, typename allocatorT>
    3472 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3473 {
    3474  vec.insert(vec.begin() + index, item);
    3475 }
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3479 {
    3480  vec.erase(vec.begin() + index);
    3481 }
    3482 
    3483 #else // #if VMA_USE_STL_VECTOR
    3484 
    3485 /* Class with interface compatible with subset of std::vector.
    3486 T must be POD because constructors and destructors are not called and memcpy is
    3487 used for these objects. */
    3488 template<typename T, typename AllocatorT>
    3489 class VmaVector
    3490 {
    3491 public:
    3492  typedef T value_type;
    3493 
    3494  VmaVector(const AllocatorT& allocator) :
    3495  m_Allocator(allocator),
    3496  m_pArray(VMA_NULL),
    3497  m_Count(0),
    3498  m_Capacity(0)
    3499  {
    3500  }
    3501 
    3502  VmaVector(size_t count, const AllocatorT& allocator) :
    3503  m_Allocator(allocator),
    3504  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3505  m_Count(count),
    3506  m_Capacity(count)
    3507  {
    3508  }
    3509 
    3510  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3511  m_Allocator(src.m_Allocator),
    3512  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3513  m_Count(src.m_Count),
    3514  m_Capacity(src.m_Count)
    3515  {
    3516  if(m_Count != 0)
    3517  {
    3518  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3519  }
    3520  }
    3521 
    3522  ~VmaVector()
    3523  {
    3524  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3525  }
    3526 
    3527  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3528  {
    3529  if(&rhs != this)
    3530  {
    3531  resize(rhs.m_Count);
    3532  if(m_Count != 0)
    3533  {
    3534  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3535  }
    3536  }
    3537  return *this;
    3538  }
    3539 
    3540  bool empty() const { return m_Count == 0; }
    3541  size_t size() const { return m_Count; }
    3542  T* data() { return m_pArray; }
    3543  const T* data() const { return m_pArray; }
    3544 
    3545  T& operator[](size_t index)
    3546  {
    3547  VMA_HEAVY_ASSERT(index < m_Count);
    3548  return m_pArray[index];
    3549  }
    3550  const T& operator[](size_t index) const
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555 
    3556  T& front()
    3557  {
    3558  VMA_HEAVY_ASSERT(m_Count > 0);
    3559  return m_pArray[0];
    3560  }
    3561  const T& front() const
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  T& back()
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[m_Count - 1];
    3570  }
    3571  const T& back() const
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576 
    3577  void reserve(size_t newCapacity, bool freeMemory = false)
    3578  {
    3579  newCapacity = VMA_MAX(newCapacity, m_Count);
    3580 
    3581  if((newCapacity < m_Capacity) && !freeMemory)
    3582  {
    3583  newCapacity = m_Capacity;
    3584  }
    3585 
    3586  if(newCapacity != m_Capacity)
    3587  {
    3588  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3589  if(m_Count != 0)
    3590  {
    3591  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3592  }
    3593  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3594  m_Capacity = newCapacity;
    3595  m_pArray = newArray;
    3596  }
    3597  }
    3598 
    3599  void resize(size_t newCount, bool freeMemory = false)
    3600  {
    3601  size_t newCapacity = m_Capacity;
    3602  if(newCount > m_Capacity)
    3603  {
    3604  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3605  }
    3606  else if(freeMemory)
    3607  {
    3608  newCapacity = newCount;
    3609  }
    3610 
    3611  if(newCapacity != m_Capacity)
    3612  {
    3613  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3614  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3615  if(elementsToCopy != 0)
    3616  {
    3617  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3618  }
    3619  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3620  m_Capacity = newCapacity;
    3621  m_pArray = newArray;
    3622  }
    3623 
    3624  m_Count = newCount;
    3625  }
    3626 
    3627  void clear(bool freeMemory = false)
    3628  {
    3629  resize(0, freeMemory);
    3630  }
    3631 
    3632  void insert(size_t index, const T& src)
    3633  {
    3634  VMA_HEAVY_ASSERT(index <= m_Count);
    3635  const size_t oldCount = size();
    3636  resize(oldCount + 1);
    3637  if(index < oldCount)
    3638  {
    3639  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3640  }
    3641  m_pArray[index] = src;
    3642  }
    3643 
    3644  void remove(size_t index)
    3645  {
    3646  VMA_HEAVY_ASSERT(index < m_Count);
    3647  const size_t oldCount = size();
    3648  if(index < oldCount - 1)
    3649  {
    3650  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3651  }
    3652  resize(oldCount - 1);
    3653  }
    3654 
    3655  void push_back(const T& src)
    3656  {
    3657  const size_t newIndex = size();
    3658  resize(newIndex + 1);
    3659  m_pArray[newIndex] = src;
    3660  }
    3661 
    3662  void pop_back()
    3663  {
    3664  VMA_HEAVY_ASSERT(m_Count > 0);
    3665  resize(size() - 1);
    3666  }
    3667 
    3668  void push_front(const T& src)
    3669  {
    3670  insert(0, src);
    3671  }
    3672 
    3673  void pop_front()
    3674  {
    3675  VMA_HEAVY_ASSERT(m_Count > 0);
    3676  remove(0);
    3677  }
    3678 
    3679  typedef T* iterator;
    3680 
    3681  iterator begin() { return m_pArray; }
    3682  iterator end() { return m_pArray + m_Count; }
    3683 
    3684 private:
    3685  AllocatorT m_Allocator;
    3686  T* m_pArray;
    3687  size_t m_Count;
    3688  size_t m_Capacity;
    3689 };
    3690 
    3691 template<typename T, typename allocatorT>
    3692 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3693 {
    3694  vec.insert(index, item);
    3695 }
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3699 {
    3700  vec.remove(index);
    3701 }
    3702 
    3703 #endif // #if VMA_USE_STL_VECTOR
    3704 
    3705 template<typename CmpLess, typename VectorT>
    3706 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3707 {
    3708  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3709  vector.data(),
    3710  vector.data() + vector.size(),
    3711  value,
    3712  CmpLess()) - vector.data();
    3713  VmaVectorInsert(vector, indexToInsert, value);
    3714  return indexToInsert;
    3715 }
    3716 
    3717 template<typename CmpLess, typename VectorT>
    3718 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3719 {
    3720  CmpLess comparator;
    3721  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3722  vector.begin(),
    3723  vector.end(),
    3724  value,
    3725  comparator);
    3726  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3727  {
    3728  size_t indexToRemove = it - vector.begin();
    3729  VmaVectorRemove(vector, indexToRemove);
    3730  return true;
    3731  }
    3732  return false;
    3733 }
    3734 
    3735 template<typename CmpLess, typename IterT, typename KeyT>
    3736 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3737 {
    3738  CmpLess comparator;
    3739  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3740  beg, end, value, comparator);
    3741  if(it == end ||
    3742  (!comparator(*it, value) && !comparator(value, *it)))
    3743  {
    3744  return it;
    3745  }
    3746  return end;
    3747 }
    3748 
    3750 // class VmaPoolAllocator
    3751 
    3752 /*
    3753 Allocator for objects of type T using a list of arrays (pools) to speed up
    3754 allocation. Number of elements that can be allocated is not bounded because
    3755 allocator can create multiple blocks.
    3756 */
    3757 template<typename T>
    3758 class VmaPoolAllocator
    3759 {
    3760  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3761 public:
    3762  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3763  ~VmaPoolAllocator();
    3764  void Clear();
    3765  T* Alloc();
    3766  void Free(T* ptr);
    3767 
    3768 private:
    3769  union Item
    3770  {
    3771  uint32_t NextFreeIndex;
    3772  T Value;
    3773  };
    3774 
    3775  struct ItemBlock
    3776  {
    3777  Item* pItems;
    3778  uint32_t FirstFreeIndex;
    3779  };
    3780 
    3781  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3782  size_t m_ItemsPerBlock;
    3783  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3784 
    3785  ItemBlock& CreateNewBlock();
    3786 };
    3787 
    3788 template<typename T>
    3789 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3790  m_pAllocationCallbacks(pAllocationCallbacks),
    3791  m_ItemsPerBlock(itemsPerBlock),
    3792  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3793 {
    3794  VMA_ASSERT(itemsPerBlock > 0);
    3795 }
    3796 
    3797 template<typename T>
    3798 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3799 {
    3800  Clear();
    3801 }
    3802 
    3803 template<typename T>
    3804 void VmaPoolAllocator<T>::Clear()
    3805 {
    3806  for(size_t i = m_ItemBlocks.size(); i--; )
    3807  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3808  m_ItemBlocks.clear();
    3809 }
    3810 
    3811 template<typename T>
    3812 T* VmaPoolAllocator<T>::Alloc()
    3813 {
    3814  for(size_t i = m_ItemBlocks.size(); i--; )
    3815  {
    3816  ItemBlock& block = m_ItemBlocks[i];
    3817  // This block has some free items: Use first one.
    3818  if(block.FirstFreeIndex != UINT32_MAX)
    3819  {
    3820  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3821  block.FirstFreeIndex = pItem->NextFreeIndex;
    3822  return &pItem->Value;
    3823  }
    3824  }
    3825 
    3826  // No block has free item: Create new one and use it.
    3827  ItemBlock& newBlock = CreateNewBlock();
    3828  Item* const pItem = &newBlock.pItems[0];
    3829  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3830  return &pItem->Value;
    3831 }
    3832 
    3833 template<typename T>
    3834 void VmaPoolAllocator<T>::Free(T* ptr)
    3835 {
    3836  // Search all memory blocks to find ptr.
    3837  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3838  {
    3839  ItemBlock& block = m_ItemBlocks[i];
    3840 
    3841  // Casting to union.
    3842  Item* pItemPtr;
    3843  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3844 
    3845  // Check if pItemPtr is in address range of this block.
    3846  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3847  {
    3848  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3849  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3850  block.FirstFreeIndex = index;
    3851  return;
    3852  }
    3853  }
    3854  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3855 }
    3856 
    3857 template<typename T>
    3858 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3859 {
    3860  ItemBlock newBlock = {
    3861  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3862 
    3863  m_ItemBlocks.push_back(newBlock);
    3864 
    3865  // Setup singly-linked list of all free items in this block.
    3866  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3867  newBlock.pItems[i].NextFreeIndex = i + 1;
    3868  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3869  return m_ItemBlocks.back();
    3870 }
    3871 
    3873 // class VmaRawList, VmaList
    3874 
    3875 #if VMA_USE_STL_LIST
    3876 
    3877 #define VmaList std::list
    3878 
    3879 #else // #if VMA_USE_STL_LIST
    3880 
    3881 template<typename T>
    3882 struct VmaListItem
    3883 {
    3884  VmaListItem* pPrev;
    3885  VmaListItem* pNext;
    3886  T Value;
    3887 };
    3888 
    3889 // Doubly linked list.
    3890 template<typename T>
    3891 class VmaRawList
    3892 {
    3893  VMA_CLASS_NO_COPY(VmaRawList)
    3894 public:
    3895  typedef VmaListItem<T> ItemType;
    3896 
    3897  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3898  ~VmaRawList();
    3899  void Clear();
    3900 
    3901  size_t GetCount() const { return m_Count; }
    3902  bool IsEmpty() const { return m_Count == 0; }
    3903 
    3904  ItemType* Front() { return m_pFront; }
    3905  const ItemType* Front() const { return m_pFront; }
    3906  ItemType* Back() { return m_pBack; }
    3907  const ItemType* Back() const { return m_pBack; }
    3908 
    3909  ItemType* PushBack();
    3910  ItemType* PushFront();
    3911  ItemType* PushBack(const T& value);
    3912  ItemType* PushFront(const T& value);
    3913  void PopBack();
    3914  void PopFront();
    3915 
    3916  // Item can be null - it means PushBack.
    3917  ItemType* InsertBefore(ItemType* pItem);
    3918  // Item can be null - it means PushFront.
    3919  ItemType* InsertAfter(ItemType* pItem);
    3920 
    3921  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3922  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3923 
    3924  void Remove(ItemType* pItem);
    3925 
    3926 private:
    3927  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3928  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3929  ItemType* m_pFront;
    3930  ItemType* m_pBack;
    3931  size_t m_Count;
    3932 };
    3933 
    3934 template<typename T>
    3935 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3936  m_pAllocationCallbacks(pAllocationCallbacks),
    3937  m_ItemAllocator(pAllocationCallbacks, 128),
    3938  m_pFront(VMA_NULL),
    3939  m_pBack(VMA_NULL),
    3940  m_Count(0)
    3941 {
    3942 }
    3943 
    3944 template<typename T>
    3945 VmaRawList<T>::~VmaRawList()
    3946 {
    3947  // Intentionally not calling Clear, because that would be unnecessary
    3948  // computations to return all items to m_ItemAllocator as free.
    3949 }
    3950 
    3951 template<typename T>
    3952 void VmaRawList<T>::Clear()
    3953 {
    3954  if(IsEmpty() == false)
    3955  {
    3956  ItemType* pItem = m_pBack;
    3957  while(pItem != VMA_NULL)
    3958  {
    3959  ItemType* const pPrevItem = pItem->pPrev;
    3960  m_ItemAllocator.Free(pItem);
    3961  pItem = pPrevItem;
    3962  }
    3963  m_pFront = VMA_NULL;
    3964  m_pBack = VMA_NULL;
    3965  m_Count = 0;
    3966  }
    3967 }
    3968 
    3969 template<typename T>
    3970 VmaListItem<T>* VmaRawList<T>::PushBack()
    3971 {
    3972  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3973  pNewItem->pNext = VMA_NULL;
    3974  if(IsEmpty())
    3975  {
    3976  pNewItem->pPrev = VMA_NULL;
    3977  m_pFront = pNewItem;
    3978  m_pBack = pNewItem;
    3979  m_Count = 1;
    3980  }
    3981  else
    3982  {
    3983  pNewItem->pPrev = m_pBack;
    3984  m_pBack->pNext = pNewItem;
    3985  m_pBack = pNewItem;
    3986  ++m_Count;
    3987  }
    3988  return pNewItem;
    3989 }
    3990 
    3991 template<typename T>
    3992 VmaListItem<T>* VmaRawList<T>::PushFront()
    3993 {
    3994  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3995  pNewItem->pPrev = VMA_NULL;
    3996  if(IsEmpty())
    3997  {
    3998  pNewItem->pNext = VMA_NULL;
    3999  m_pFront = pNewItem;
    4000  m_pBack = pNewItem;
    4001  m_Count = 1;
    4002  }
    4003  else
    4004  {
    4005  pNewItem->pNext = m_pFront;
    4006  m_pFront->pPrev = pNewItem;
    4007  m_pFront = pNewItem;
    4008  ++m_Count;
    4009  }
    4010  return pNewItem;
    4011 }
    4012 
    4013 template<typename T>
    4014 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4015 {
    4016  ItemType* const pNewItem = PushBack();
    4017  pNewItem->Value = value;
    4018  return pNewItem;
    4019 }
    4020 
    4021 template<typename T>
    4022 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4023 {
    4024  ItemType* const pNewItem = PushFront();
    4025  pNewItem->Value = value;
    4026  return pNewItem;
    4027 }
    4028 
    4029 template<typename T>
    4030 void VmaRawList<T>::PopBack()
    4031 {
    4032  VMA_HEAVY_ASSERT(m_Count > 0);
    4033  ItemType* const pBackItem = m_pBack;
    4034  ItemType* const pPrevItem = pBackItem->pPrev;
    4035  if(pPrevItem != VMA_NULL)
    4036  {
    4037  pPrevItem->pNext = VMA_NULL;
    4038  }
    4039  m_pBack = pPrevItem;
    4040  m_ItemAllocator.Free(pBackItem);
    4041  --m_Count;
    4042 }
    4043 
    4044 template<typename T>
    4045 void VmaRawList<T>::PopFront()
    4046 {
    4047  VMA_HEAVY_ASSERT(m_Count > 0);
    4048  ItemType* const pFrontItem = m_pFront;
    4049  ItemType* const pNextItem = pFrontItem->pNext;
    4050  if(pNextItem != VMA_NULL)
    4051  {
    4052  pNextItem->pPrev = VMA_NULL;
    4053  }
    4054  m_pFront = pNextItem;
    4055  m_ItemAllocator.Free(pFrontItem);
    4056  --m_Count;
    4057 }
    4058 
    4059 template<typename T>
    4060 void VmaRawList<T>::Remove(ItemType* pItem)
    4061 {
    4062  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4063  VMA_HEAVY_ASSERT(m_Count > 0);
    4064 
    4065  if(pItem->pPrev != VMA_NULL)
    4066  {
    4067  pItem->pPrev->pNext = pItem->pNext;
    4068  }
    4069  else
    4070  {
    4071  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4072  m_pFront = pItem->pNext;
    4073  }
    4074 
    4075  if(pItem->pNext != VMA_NULL)
    4076  {
    4077  pItem->pNext->pPrev = pItem->pPrev;
    4078  }
    4079  else
    4080  {
    4081  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4082  m_pBack = pItem->pPrev;
    4083  }
    4084 
    4085  m_ItemAllocator.Free(pItem);
    4086  --m_Count;
    4087 }
    4088 
    4089 template<typename T>
    4090 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4091 {
    4092  if(pItem != VMA_NULL)
    4093  {
    4094  ItemType* const prevItem = pItem->pPrev;
    4095  ItemType* const newItem = m_ItemAllocator.Alloc();
    4096  newItem->pPrev = prevItem;
    4097  newItem->pNext = pItem;
    4098  pItem->pPrev = newItem;
    4099  if(prevItem != VMA_NULL)
    4100  {
    4101  prevItem->pNext = newItem;
    4102  }
    4103  else
    4104  {
    4105  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4106  m_pFront = newItem;
    4107  }
    4108  ++m_Count;
    4109  return newItem;
    4110  }
    4111  else
    4112  return PushBack();
    4113 }
    4114 
    4115 template<typename T>
    4116 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4117 {
    4118  if(pItem != VMA_NULL)
    4119  {
    4120  ItemType* const nextItem = pItem->pNext;
    4121  ItemType* const newItem = m_ItemAllocator.Alloc();
    4122  newItem->pNext = nextItem;
    4123  newItem->pPrev = pItem;
    4124  pItem->pNext = newItem;
    4125  if(nextItem != VMA_NULL)
    4126  {
    4127  nextItem->pPrev = newItem;
    4128  }
    4129  else
    4130  {
    4131  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4132  m_pBack = newItem;
    4133  }
    4134  ++m_Count;
    4135  return newItem;
    4136  }
    4137  else
    4138  return PushFront();
    4139 }
    4140 
    4141 template<typename T>
    4142 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4143 {
    4144  ItemType* const newItem = InsertBefore(pItem);
    4145  newItem->Value = value;
    4146  return newItem;
    4147 }
    4148 
    4149 template<typename T>
    4150 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4151 {
    4152  ItemType* const newItem = InsertAfter(pItem);
    4153  newItem->Value = value;
    4154  return newItem;
    4155 }
    4156 
    4157 template<typename T, typename AllocatorT>
    4158 class VmaList
    4159 {
    4160  VMA_CLASS_NO_COPY(VmaList)
    4161 public:
    4162  class iterator
    4163  {
    4164  public:
    4165  iterator() :
    4166  m_pList(VMA_NULL),
    4167  m_pItem(VMA_NULL)
    4168  {
    4169  }
    4170 
    4171  T& operator*() const
    4172  {
    4173  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4174  return m_pItem->Value;
    4175  }
    4176  T* operator->() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return &m_pItem->Value;
    4180  }
    4181 
    4182  iterator& operator++()
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  m_pItem = m_pItem->pNext;
    4186  return *this;
    4187  }
    4188  iterator& operator--()
    4189  {
    4190  if(m_pItem != VMA_NULL)
    4191  {
    4192  m_pItem = m_pItem->pPrev;
    4193  }
    4194  else
    4195  {
    4196  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4197  m_pItem = m_pList->Back();
    4198  }
    4199  return *this;
    4200  }
    4201 
    4202  iterator operator++(int)
    4203  {
    4204  iterator result = *this;
    4205  ++*this;
    4206  return result;
    4207  }
    4208  iterator operator--(int)
    4209  {
    4210  iterator result = *this;
    4211  --*this;
    4212  return result;
    4213  }
    4214 
    4215  bool operator==(const iterator& rhs) const
    4216  {
    4217  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4218  return m_pItem == rhs.m_pItem;
    4219  }
    4220  bool operator!=(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem != rhs.m_pItem;
    4224  }
    4225 
    4226  private:
    4227  VmaRawList<T>* m_pList;
    4228  VmaListItem<T>* m_pItem;
    4229 
    4230  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4231  m_pList(pList),
    4232  m_pItem(pItem)
    4233  {
    4234  }
    4235 
    4236  friend class VmaList<T, AllocatorT>;
    4237  };
    4238 
    4239  class const_iterator
    4240  {
    4241  public:
    4242  const_iterator() :
    4243  m_pList(VMA_NULL),
    4244  m_pItem(VMA_NULL)
    4245  {
    4246  }
    4247 
    4248  const_iterator(const iterator& src) :
    4249  m_pList(src.m_pList),
    4250  m_pItem(src.m_pItem)
    4251  {
    4252  }
    4253 
    4254  const T& operator*() const
    4255  {
    4256  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4257  return m_pItem->Value;
    4258  }
    4259  const T* operator->() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return &m_pItem->Value;
    4263  }
    4264 
    4265  const_iterator& operator++()
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  m_pItem = m_pItem->pNext;
    4269  return *this;
    4270  }
    4271  const_iterator& operator--()
    4272  {
    4273  if(m_pItem != VMA_NULL)
    4274  {
    4275  m_pItem = m_pItem->pPrev;
    4276  }
    4277  else
    4278  {
    4279  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4280  m_pItem = m_pList->Back();
    4281  }
    4282  return *this;
    4283  }
    4284 
    4285  const_iterator operator++(int)
    4286  {
    4287  const_iterator result = *this;
    4288  ++*this;
    4289  return result;
    4290  }
    4291  const_iterator operator--(int)
    4292  {
    4293  const_iterator result = *this;
    4294  --*this;
    4295  return result;
    4296  }
    4297 
    4298  bool operator==(const const_iterator& rhs) const
    4299  {
    4300  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4301  return m_pItem == rhs.m_pItem;
    4302  }
    4303  bool operator!=(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem != rhs.m_pItem;
    4307  }
    4308 
    4309  private:
    4310  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4311  m_pList(pList),
    4312  m_pItem(pItem)
    4313  {
    4314  }
    4315 
    4316  const VmaRawList<T>* m_pList;
    4317  const VmaListItem<T>* m_pItem;
    4318 
    4319  friend class VmaList<T, AllocatorT>;
    4320  };
    4321 
    4322  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4323 
    4324  bool empty() const { return m_RawList.IsEmpty(); }
    4325  size_t size() const { return m_RawList.GetCount(); }
    4326 
    4327  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4328  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4329 
    4330  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4331  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4332 
    4333  void clear() { m_RawList.Clear(); }
    4334  void push_back(const T& value) { m_RawList.PushBack(value); }
    4335  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4336  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4337 
    4338 private:
    4339  VmaRawList<T> m_RawList;
    4340 };
    4341 
    4342 #endif // #if VMA_USE_STL_LIST
    4343 
    4345 // class VmaMap
    4346 
    4347 // Unused in this version.
    4348 #if 0
    4349 
    4350 #if VMA_USE_STL_UNORDERED_MAP
    4351 
    4352 #define VmaPair std::pair
    4353 
    4354 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4355  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4356 
    4357 #else // #if VMA_USE_STL_UNORDERED_MAP
    4358 
    4359 template<typename T1, typename T2>
    4360 struct VmaPair
    4361 {
    4362  T1 first;
    4363  T2 second;
    4364 
    4365  VmaPair() : first(), second() { }
    4366  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4367 };
    4368 
    4369 /* Class compatible with subset of interface of std::unordered_map.
    4370 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4371 */
    4372 template<typename KeyT, typename ValueT>
    4373 class VmaMap
    4374 {
    4375 public:
    4376  typedef VmaPair<KeyT, ValueT> PairType;
    4377  typedef PairType* iterator;
    4378 
    4379  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4380 
    4381  iterator begin() { return m_Vector.begin(); }
    4382  iterator end() { return m_Vector.end(); }
    4383 
    4384  void insert(const PairType& pair);
    4385  iterator find(const KeyT& key);
    4386  void erase(iterator it);
    4387 
    4388 private:
    4389  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4390 };
    4391 
    4392 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4393 
    4394 template<typename FirstT, typename SecondT>
    4395 struct VmaPairFirstLess
    4396 {
    4397  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4398  {
    4399  return lhs.first < rhs.first;
    4400  }
    4401  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4402  {
    4403  return lhs.first < rhsFirst;
    4404  }
    4405 };
    4406 
    4407 template<typename KeyT, typename ValueT>
    4408 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4409 {
    4410  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4411  m_Vector.data(),
    4412  m_Vector.data() + m_Vector.size(),
    4413  pair,
    4414  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4415  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4416 }
    4417 
    4418 template<typename KeyT, typename ValueT>
    4419 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4420 {
    4421  PairType* it = VmaBinaryFindFirstNotLess(
    4422  m_Vector.data(),
    4423  m_Vector.data() + m_Vector.size(),
    4424  key,
    4425  VmaPairFirstLess<KeyT, ValueT>());
    4426  if((it != m_Vector.end()) && (it->first == key))
    4427  {
    4428  return it;
    4429  }
    4430  else
    4431  {
    4432  return m_Vector.end();
    4433  }
    4434 }
    4435 
    4436 template<typename KeyT, typename ValueT>
    4437 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4438 {
    4439  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4440 }
    4441 
    4442 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4443 
    4444 #endif // #if 0
    4445 
    4447 
    4448 class VmaDeviceMemoryBlock;
    4449 
    4450 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4451 
    4452 struct VmaAllocation_T
    4453 {
    4454  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4455 private:
    4456  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4457 
    4458  enum FLAGS
    4459  {
    4460  FLAG_USER_DATA_STRING = 0x01,
    4461  };
    4462 
    4463 public:
    4464  enum ALLOCATION_TYPE
    4465  {
    4466  ALLOCATION_TYPE_NONE,
    4467  ALLOCATION_TYPE_BLOCK,
    4468  ALLOCATION_TYPE_DEDICATED,
    4469  };
    4470 
    4471  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4472  m_Alignment(1),
    4473  m_Size(0),
    4474  m_pUserData(VMA_NULL),
    4475  m_LastUseFrameIndex(currentFrameIndex),
    4476  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4477  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4478  m_MapCount(0),
    4479  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4480  {
    4481 #if VMA_STATS_STRING_ENABLED
    4482  m_CreationFrameIndex = currentFrameIndex;
    4483  m_BufferImageUsage = 0;
    4484 #endif
    4485  }
    4486 
    4487  ~VmaAllocation_T()
    4488  {
    4489  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4490 
    4491  // Check if owned string was freed.
    4492  VMA_ASSERT(m_pUserData == VMA_NULL);
    4493  }
    4494 
    4495  void InitBlockAllocation(
    4496  VmaPool hPool,
    4497  VmaDeviceMemoryBlock* block,
    4498  VkDeviceSize offset,
    4499  VkDeviceSize alignment,
    4500  VkDeviceSize size,
    4501  VmaSuballocationType suballocationType,
    4502  bool mapped,
    4503  bool canBecomeLost)
    4504  {
    4505  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4506  VMA_ASSERT(block != VMA_NULL);
    4507  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4508  m_Alignment = alignment;
    4509  m_Size = size;
    4510  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4511  m_SuballocationType = (uint8_t)suballocationType;
    4512  m_BlockAllocation.m_hPool = hPool;
    4513  m_BlockAllocation.m_Block = block;
    4514  m_BlockAllocation.m_Offset = offset;
    4515  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4516  }
    4517 
    4518  void InitLost()
    4519  {
    4520  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4521  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4522  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4523  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4524  m_BlockAllocation.m_Block = VMA_NULL;
    4525  m_BlockAllocation.m_Offset = 0;
    4526  m_BlockAllocation.m_CanBecomeLost = true;
    4527  }
    4528 
    4529  void ChangeBlockAllocation(
    4530  VmaAllocator hAllocator,
    4531  VmaDeviceMemoryBlock* block,
    4532  VkDeviceSize offset);
    4533 
    4534  void ChangeSize(VkDeviceSize newSize);
    4535 
    4536  // pMappedData not null means allocation is created with MAPPED flag.
    4537  void InitDedicatedAllocation(
    4538  uint32_t memoryTypeIndex,
    4539  VkDeviceMemory hMemory,
    4540  VmaSuballocationType suballocationType,
    4541  void* pMappedData,
    4542  VkDeviceSize size)
    4543  {
    4544  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4545  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4546  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4547  m_Alignment = 0;
    4548  m_Size = size;
    4549  m_SuballocationType = (uint8_t)suballocationType;
    4550  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4551  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4552  m_DedicatedAllocation.m_hMemory = hMemory;
    4553  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4554  }
    4555 
    4556  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4557  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4558  VkDeviceSize GetSize() const { return m_Size; }
    4559  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4560  void* GetUserData() const { return m_pUserData; }
    4561  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4562  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4563 
    4564  VmaDeviceMemoryBlock* GetBlock() const
    4565  {
    4566  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4567  return m_BlockAllocation.m_Block;
    4568  }
    4569  VkDeviceSize GetOffset() const;
    4570  VkDeviceMemory GetMemory() const;
    4571  uint32_t GetMemoryTypeIndex() const;
    4572  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4573  void* GetMappedData() const;
    4574  bool CanBecomeLost() const;
    4575  VmaPool GetPool() const;
    4576 
    4577  uint32_t GetLastUseFrameIndex() const
    4578  {
    4579  return m_LastUseFrameIndex.load();
    4580  }
    4581  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4582  {
    4583  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4584  }
    4585  /*
    4586  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4587  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4588  - Else, returns false.
    4589 
    4590  If hAllocation is already lost, assert - you should not call it then.
    4591  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4592  */
    4593  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4594 
    4595  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4596  {
    4597  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4598  outInfo.blockCount = 1;
    4599  outInfo.allocationCount = 1;
    4600  outInfo.unusedRangeCount = 0;
    4601  outInfo.usedBytes = m_Size;
    4602  outInfo.unusedBytes = 0;
    4603  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4604  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4605  outInfo.unusedRangeSizeMax = 0;
    4606  }
    4607 
    4608  void BlockAllocMap();
    4609  void BlockAllocUnmap();
    4610  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4611  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4612 
    4613 #if VMA_STATS_STRING_ENABLED
    4614  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4615  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4616 
    4617  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4618  {
    4619  VMA_ASSERT(m_BufferImageUsage == 0);
    4620  m_BufferImageUsage = bufferImageUsage;
    4621  }
    4622 
    4623  void PrintParameters(class VmaJsonWriter& json) const;
    4624 #endif
    4625 
    4626 private:
    4627  VkDeviceSize m_Alignment;
    4628  VkDeviceSize m_Size;
    4629  void* m_pUserData;
    4630  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4631  uint8_t m_Type; // ALLOCATION_TYPE
    4632  uint8_t m_SuballocationType; // VmaSuballocationType
    4633  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4634  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4635  uint8_t m_MapCount;
    4636  uint8_t m_Flags; // enum FLAGS
    4637 
    4638  // Allocation out of VmaDeviceMemoryBlock.
    4639  struct BlockAllocation
    4640  {
    4641  VmaPool m_hPool; // Null if belongs to general memory.
    4642  VmaDeviceMemoryBlock* m_Block;
    4643  VkDeviceSize m_Offset;
    4644  bool m_CanBecomeLost;
    4645  };
    4646 
    4647  // Allocation for an object that has its own private VkDeviceMemory.
    4648  struct DedicatedAllocation
    4649  {
    4650  uint32_t m_MemoryTypeIndex;
    4651  VkDeviceMemory m_hMemory;
    4652  void* m_pMappedData; // Not null means memory is mapped.
    4653  };
    4654 
    4655  union
    4656  {
    4657  // Allocation out of VmaDeviceMemoryBlock.
    4658  BlockAllocation m_BlockAllocation;
    4659  // Allocation for an object that has its own private VkDeviceMemory.
    4660  DedicatedAllocation m_DedicatedAllocation;
    4661  };
    4662 
    4663 #if VMA_STATS_STRING_ENABLED
    4664  uint32_t m_CreationFrameIndex;
    4665  uint32_t m_BufferImageUsage; // 0 if unknown.
    4666 #endif
    4667 
    4668  void FreeUserDataString(VmaAllocator hAllocator);
    4669 };
    4670 
    4671 /*
    4672 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4673 allocated memory block or free.
    4674 */
    4675 struct VmaSuballocation
    4676 {
    4677  VkDeviceSize offset;
    4678  VkDeviceSize size;
    4679  VmaAllocation hAllocation;
    4680  VmaSuballocationType type;
    4681 };
    4682 
    4683 // Comparator for offsets.
    4684 struct VmaSuballocationOffsetLess
    4685 {
    4686  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4687  {
    4688  return lhs.offset < rhs.offset;
    4689  }
    4690 };
    4691 struct VmaSuballocationOffsetGreater
    4692 {
    4693  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4694  {
    4695  return lhs.offset > rhs.offset;
    4696  }
    4697 };
    4698 
    4699 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4700 
    4701 // Cost of one additional allocation lost, as equivalent in bytes.
    4702 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4703 
    4704 /*
    4705 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4706 
    4707 If canMakeOtherLost was false:
    4708 - item points to a FREE suballocation.
    4709 - itemsToMakeLostCount is 0.
    4710 
    4711 If canMakeOtherLost was true:
    4712 - item points to first of sequence of suballocations, which are either FREE,
    4713  or point to VmaAllocations that can become lost.
    4714 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4715  the requested allocation to succeed.
    4716 */
    4717 struct VmaAllocationRequest
    4718 {
    4719  VkDeviceSize offset;
    4720  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4721  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4722  VmaSuballocationList::iterator item;
    4723  size_t itemsToMakeLostCount;
    4724  void* customData;
    4725 
    4726  VkDeviceSize CalcCost() const
    4727  {
    4728  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4729  }
    4730 };
    4731 
    4732 /*
    4733 Data structure used for bookkeeping of allocations and unused ranges of memory
    4734 in a single VkDeviceMemory block.
    4735 */
    4736 class VmaBlockMetadata
    4737 {
    4738 public:
    4739  VmaBlockMetadata(VmaAllocator hAllocator);
    4740  virtual ~VmaBlockMetadata() { }
    4741  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4742 
    4743  // Validates all data structures inside this object. If not valid, returns false.
    4744  virtual bool Validate() const = 0;
    4745  VkDeviceSize GetSize() const { return m_Size; }
    4746  virtual size_t GetAllocationCount() const = 0;
    4747  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4748  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4749  // Returns true if this block is empty - contains only single free suballocation.
    4750  virtual bool IsEmpty() const = 0;
    4751 
    4752  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4753  // Shouldn't modify blockCount.
    4754  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4755 
    4756 #if VMA_STATS_STRING_ENABLED
    4757  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4758 #endif
    4759 
    4760  // Tries to find a place for suballocation with given parameters inside this block.
    4761  // If succeeded, fills pAllocationRequest and returns true.
    4762  // If failed, returns false.
    4763  virtual bool CreateAllocationRequest(
    4764  uint32_t currentFrameIndex,
    4765  uint32_t frameInUseCount,
    4766  VkDeviceSize bufferImageGranularity,
    4767  VkDeviceSize allocSize,
    4768  VkDeviceSize allocAlignment,
    4769  bool upperAddress,
    4770  VmaSuballocationType allocType,
    4771  bool canMakeOtherLost,
    4772  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4773  VmaAllocationRequest* pAllocationRequest) = 0;
    4774 
    4775  virtual bool MakeRequestedAllocationsLost(
    4776  uint32_t currentFrameIndex,
    4777  uint32_t frameInUseCount,
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4781 
    4782  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4783 
    4784  // Makes actual allocation based on request. Request must already be checked and valid.
    4785  virtual void Alloc(
    4786  const VmaAllocationRequest& request,
    4787  VmaSuballocationType type,
    4788  VkDeviceSize allocSize,
    4789  bool upperAddress,
    4790  VmaAllocation hAllocation) = 0;
    4791 
    4792  // Frees suballocation assigned to given memory region.
    4793  virtual void Free(const VmaAllocation allocation) = 0;
    4794  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4795 
    4796  // Tries to resize (grow or shrink) space for given allocation, in place.
    4797  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4798 
    4799 protected:
    4800  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4801 
    4802 #if VMA_STATS_STRING_ENABLED
    4803  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4804  VkDeviceSize unusedBytes,
    4805  size_t allocationCount,
    4806  size_t unusedRangeCount) const;
    4807  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4808  VkDeviceSize offset,
    4809  VmaAllocation hAllocation) const;
    4810  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4811  VkDeviceSize offset,
    4812  VkDeviceSize size) const;
    4813  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4814 #endif
    4815 
    4816 private:
    4817  VkDeviceSize m_Size;
    4818  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4819 };
    4820 
    4821 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4822  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4823  return false; \
    4824  } } while(false)
    4825 
    4826 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4827 {
    4828  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4829 public:
    4830  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4831  virtual ~VmaBlockMetadata_Generic();
    4832  virtual void Init(VkDeviceSize size);
    4833 
    4834  virtual bool Validate() const;
    4835  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4836  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4837  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4838  virtual bool IsEmpty() const;
    4839 
    4840  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4841  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4842 
    4843 #if VMA_STATS_STRING_ENABLED
    4844  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4845 #endif
    4846 
    4847  virtual bool CreateAllocationRequest(
    4848  uint32_t currentFrameIndex,
    4849  uint32_t frameInUseCount,
    4850  VkDeviceSize bufferImageGranularity,
    4851  VkDeviceSize allocSize,
    4852  VkDeviceSize allocAlignment,
    4853  bool upperAddress,
    4854  VmaSuballocationType allocType,
    4855  bool canMakeOtherLost,
    4856  uint32_t strategy,
    4857  VmaAllocationRequest* pAllocationRequest);
    4858 
    4859  virtual bool MakeRequestedAllocationsLost(
    4860  uint32_t currentFrameIndex,
    4861  uint32_t frameInUseCount,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4865 
    4866  virtual VkResult CheckCorruption(const void* pBlockData);
    4867 
    4868  virtual void Alloc(
    4869  const VmaAllocationRequest& request,
    4870  VmaSuballocationType type,
    4871  VkDeviceSize allocSize,
    4872  bool upperAddress,
    4873  VmaAllocation hAllocation);
    4874 
    4875  virtual void Free(const VmaAllocation allocation);
    4876  virtual void FreeAtOffset(VkDeviceSize offset);
    4877 
    4878  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4879 
    4880 private:
    4881  uint32_t m_FreeCount;
    4882  VkDeviceSize m_SumFreeSize;
    4883  VmaSuballocationList m_Suballocations;
    4884  // Suballocations that are free and have size greater than certain threshold.
    4885  // Sorted by size, ascending.
    4886  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4887 
    4888  bool ValidateFreeSuballocationList() const;
    4889 
    4890  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4891  // If yes, fills pOffset and returns true. If no, returns false.
    4892  bool CheckAllocation(
    4893  uint32_t currentFrameIndex,
    4894  uint32_t frameInUseCount,
    4895  VkDeviceSize bufferImageGranularity,
    4896  VkDeviceSize allocSize,
    4897  VkDeviceSize allocAlignment,
    4898  VmaSuballocationType allocType,
    4899  VmaSuballocationList::const_iterator suballocItem,
    4900  bool canMakeOtherLost,
    4901  VkDeviceSize* pOffset,
    4902  size_t* itemsToMakeLostCount,
    4903  VkDeviceSize* pSumFreeSize,
    4904  VkDeviceSize* pSumItemSize) const;
    4905  // Given free suballocation, it merges it with following one, which must also be free.
    4906  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4907  // Releases given suballocation, making it free.
    4908  // Merges it with adjacent free suballocations if applicable.
    4909  // Returns iterator to new free suballocation at this place.
    4910  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4911  // Given free suballocation, it inserts it into sorted list of
    4912  // m_FreeSuballocationsBySize if it's suitable.
    4913  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4914  // Given free suballocation, it removes it from sorted list of
    4915  // m_FreeSuballocationsBySize if it's suitable.
    4916  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4917 };
    4918 
    4919 /*
    4920 Allocations and their references in internal data structure look like this:
    4921 
    4922 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4923 
    4924  0 +-------+
    4925  | |
    4926  | |
    4927  | |
    4928  +-------+
    4929  | Alloc | 1st[m_1stNullItemsBeginCount]
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4932  +-------+
    4933  | ... |
    4934  +-------+
    4935  | Alloc | 1st[1st.size() - 1]
    4936  +-------+
    4937  | |
    4938  | |
    4939  | |
    4940 GetSize() +-------+
    4941 
    4942 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4943 
    4944  0 +-------+
    4945  | Alloc | 2nd[0]
    4946  +-------+
    4947  | Alloc | 2nd[1]
    4948  +-------+
    4949  | ... |
    4950  +-------+
    4951  | Alloc | 2nd[2nd.size() - 1]
    4952  +-------+
    4953  | |
    4954  | |
    4955  | |
    4956  +-------+
    4957  | Alloc | 1st[m_1stNullItemsBeginCount]
    4958  +-------+
    4959  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4960  +-------+
    4961  | ... |
    4962  +-------+
    4963  | Alloc | 1st[1st.size() - 1]
    4964  +-------+
    4965  | |
    4966 GetSize() +-------+
    4967 
    4968 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4969 
    4970  0 +-------+
    4971  | |
    4972  | |
    4973  | |
    4974  +-------+
    4975  | Alloc | 1st[m_1stNullItemsBeginCount]
    4976  +-------+
    4977  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4978  +-------+
    4979  | ... |
    4980  +-------+
    4981  | Alloc | 1st[1st.size() - 1]
    4982  +-------+
    4983  | |
    4984  | |
    4985  | |
    4986  +-------+
    4987  | Alloc | 2nd[2nd.size() - 1]
    4988  +-------+
    4989  | ... |
    4990  +-------+
    4991  | Alloc | 2nd[1]
    4992  +-------+
    4993  | Alloc | 2nd[0]
    4994 GetSize() +-------+
    4995 
    4996 */
    4997 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    4998 {
    4999  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5000 public:
    5001  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5002  virtual ~VmaBlockMetadata_Linear();
    5003  virtual void Init(VkDeviceSize size);
    5004 
    5005  virtual bool Validate() const;
    5006  virtual size_t GetAllocationCount() const;
    5007  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5008  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5009  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5010 
    5011  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5012  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5013 
    5014 #if VMA_STATS_STRING_ENABLED
    5015  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5016 #endif
    5017 
    5018  virtual bool CreateAllocationRequest(
    5019  uint32_t currentFrameIndex,
    5020  uint32_t frameInUseCount,
    5021  VkDeviceSize bufferImageGranularity,
    5022  VkDeviceSize allocSize,
    5023  VkDeviceSize allocAlignment,
    5024  bool upperAddress,
    5025  VmaSuballocationType allocType,
    5026  bool canMakeOtherLost,
    5027  uint32_t strategy,
    5028  VmaAllocationRequest* pAllocationRequest);
    5029 
    5030  virtual bool MakeRequestedAllocationsLost(
    5031  uint32_t currentFrameIndex,
    5032  uint32_t frameInUseCount,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5036 
    5037  virtual VkResult CheckCorruption(const void* pBlockData);
    5038 
    5039  virtual void Alloc(
    5040  const VmaAllocationRequest& request,
    5041  VmaSuballocationType type,
    5042  VkDeviceSize allocSize,
    5043  bool upperAddress,
    5044  VmaAllocation hAllocation);
    5045 
    5046  virtual void Free(const VmaAllocation allocation);
    5047  virtual void FreeAtOffset(VkDeviceSize offset);
    5048 
    5049 private:
    5050  /*
    5051  There are two suballocation vectors, used in ping-pong way.
    5052  The one with index m_1stVectorIndex is called 1st.
    5053  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5054  2nd can be non-empty only when 1st is not empty.
    5055  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5056  */
    5057  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5058 
    5059  enum SECOND_VECTOR_MODE
    5060  {
    5061  SECOND_VECTOR_EMPTY,
    5062  /*
    5063  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5064  all have smaller offset.
    5065  */
    5066  SECOND_VECTOR_RING_BUFFER,
    5067  /*
    5068  Suballocations in 2nd vector are upper side of double stack.
    5069  They all have offsets higher than those in 1st vector.
    5070  Top of this stack means smaller offsets, but higher indices in this vector.
    5071  */
    5072  SECOND_VECTOR_DOUBLE_STACK,
    5073  };
    5074 
    5075  VkDeviceSize m_SumFreeSize;
    5076  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5077  uint32_t m_1stVectorIndex;
    5078  SECOND_VECTOR_MODE m_2ndVectorMode;
    5079 
    5080  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5081  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5082  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5083  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5084 
    5085  // Number of items in 1st vector with hAllocation = null at the beginning.
    5086  size_t m_1stNullItemsBeginCount;
    5087  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5088  size_t m_1stNullItemsMiddleCount;
    5089  // Number of items in 2nd vector with hAllocation = null.
    5090  size_t m_2ndNullItemsCount;
    5091 
    5092  bool ShouldCompact1st() const;
    5093  void CleanupAfterFree();
    5094 };
    5095 
    5096 /*
    5097 - GetSize() is the original size of allocated memory block.
    5098 - m_UsableSize is this size aligned down to a power of two.
    5099  All allocations and calculations happen relative to m_UsableSize.
    5100 - GetUnusableSize() is the difference between them.
    5101  It is repoted as separate, unused range, not available for allocations.
    5102 
    5103 Node at level 0 has size = m_UsableSize.
    5104 Each next level contains nodes with size 2 times smaller than current level.
    5105 m_LevelCount is the maximum number of levels to use in the current object.
    5106 */
    5107 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5108 {
    5109  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5110 public:
    5111  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5112  virtual ~VmaBlockMetadata_Buddy();
    5113  virtual void Init(VkDeviceSize size);
    5114 
    5115  virtual bool Validate() const;
    5116  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5117  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5118  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5119  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5120 
    5121  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5122  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5123 
    5124 #if VMA_STATS_STRING_ENABLED
    5125  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5126 #endif
    5127 
    5128  virtual bool CreateAllocationRequest(
    5129  uint32_t currentFrameIndex,
    5130  uint32_t frameInUseCount,
    5131  VkDeviceSize bufferImageGranularity,
    5132  VkDeviceSize allocSize,
    5133  VkDeviceSize allocAlignment,
    5134  bool upperAddress,
    5135  VmaSuballocationType allocType,
    5136  bool canMakeOtherLost,
    5137  uint32_t strategy,
    5138  VmaAllocationRequest* pAllocationRequest);
    5139 
    5140  virtual bool MakeRequestedAllocationsLost(
    5141  uint32_t currentFrameIndex,
    5142  uint32_t frameInUseCount,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5146 
    5147  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5148 
    5149  virtual void Alloc(
    5150  const VmaAllocationRequest& request,
    5151  VmaSuballocationType type,
    5152  VkDeviceSize allocSize,
    5153  bool upperAddress,
    5154  VmaAllocation hAllocation);
    5155 
    5156  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5157  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5158 
    5159 private:
    5160  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5161  static const size_t MAX_LEVELS = 30;
    5162 
    5163  struct ValidationContext
    5164  {
    5165  size_t calculatedAllocationCount;
    5166  size_t calculatedFreeCount;
    5167  VkDeviceSize calculatedSumFreeSize;
    5168 
    5169  ValidationContext() :
    5170  calculatedAllocationCount(0),
    5171  calculatedFreeCount(0),
    5172  calculatedSumFreeSize(0) { }
    5173  };
    5174 
    5175  struct Node
    5176  {
    5177  VkDeviceSize offset;
    5178  enum TYPE
    5179  {
    5180  TYPE_FREE,
    5181  TYPE_ALLOCATION,
    5182  TYPE_SPLIT,
    5183  TYPE_COUNT
    5184  } type;
    5185  Node* parent;
    5186  Node* buddy;
    5187 
    5188  union
    5189  {
    5190  struct
    5191  {
    5192  Node* prev;
    5193  Node* next;
    5194  } free;
    5195  struct
    5196  {
    5197  VmaAllocation alloc;
    5198  } allocation;
    5199  struct
    5200  {
    5201  Node* leftChild;
    5202  } split;
    5203  };
    5204  };
    5205 
    5206  // Size of the memory block aligned down to a power of two.
    5207  VkDeviceSize m_UsableSize;
    5208  uint32_t m_LevelCount;
    5209 
    5210  Node* m_Root;
    5211  struct {
    5212  Node* front;
    5213  Node* back;
    5214  } m_FreeList[MAX_LEVELS];
    5215  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5216  size_t m_AllocationCount;
    5217  // Number of nodes in the tree with type == TYPE_FREE.
    5218  size_t m_FreeCount;
    5219  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5220  VkDeviceSize m_SumFreeSize;
    5221 
    5222  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5223  void DeleteNode(Node* node);
    5224  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5225  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5226  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5227  // Alloc passed just for validation. Can be null.
    5228  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5229  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5230  // Adds node to the front of FreeList at given level.
    5231  // node->type must be FREE.
    5232  // node->free.prev, next can be undefined.
    5233  void AddToFreeListFront(uint32_t level, Node* node);
    5234  // Removes node from FreeList at given level.
    5235  // node->type must be FREE.
    5236  // node->free.prev, next stay untouched.
    5237  void RemoveFromFreeList(uint32_t level, Node* node);
    5238 
    5239 #if VMA_STATS_STRING_ENABLED
    5240  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5241 #endif
    5242 };
    5243 
    5244 /*
    5245 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5246 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5247 
    5248 Thread-safety: This class must be externally synchronized.
    5249 */
    5250 class VmaDeviceMemoryBlock
    5251 {
    5252  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5253 public:
    5254  VmaBlockMetadata* m_pMetadata;
    5255 
    5256  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5257 
    5258  ~VmaDeviceMemoryBlock()
    5259  {
    5260  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5261  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5262  }
    5263 
    5264  // Always call after construction.
    5265  void Init(
    5266  VmaAllocator hAllocator,
    5267  uint32_t newMemoryTypeIndex,
    5268  VkDeviceMemory newMemory,
    5269  VkDeviceSize newSize,
    5270  uint32_t id,
    5271  uint32_t algorithm);
    5272  // Always call before destruction.
    5273  void Destroy(VmaAllocator allocator);
    5274 
    5275  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5276  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5277  uint32_t GetId() const { return m_Id; }
    5278  void* GetMappedData() const { return m_pMappedData; }
    5279 
    5280  // Validates all data structures inside this object. If not valid, returns false.
    5281  bool Validate() const;
    5282 
    5283  VkResult CheckCorruption(VmaAllocator hAllocator);
    5284 
    5285  // ppData can be null.
    5286  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5287  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5288 
    5289  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5290  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5291 
    5292  VkResult BindBufferMemory(
    5293  const VmaAllocator hAllocator,
    5294  const VmaAllocation hAllocation,
    5295  VkBuffer hBuffer);
    5296  VkResult BindImageMemory(
    5297  const VmaAllocator hAllocator,
    5298  const VmaAllocation hAllocation,
    5299  VkImage hImage);
    5300 
    5301 private:
    5302  uint32_t m_MemoryTypeIndex;
    5303  uint32_t m_Id;
    5304  VkDeviceMemory m_hMemory;
    5305 
    5306  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5307  // Also protects m_MapCount, m_pMappedData.
    5308  VMA_MUTEX m_Mutex;
    5309  uint32_t m_MapCount;
    5310  void* m_pMappedData;
    5311 };
    5312 
    5313 struct VmaPointerLess
    5314 {
    5315  bool operator()(const void* lhs, const void* rhs) const
    5316  {
    5317  return lhs < rhs;
    5318  }
    5319 };
    5320 
    5321 class VmaDefragmentator;
    5322 
    5323 /*
    5324 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5325 Vulkan memory type.
    5326 
    5327 Synchronized internally with a mutex.
    5328 */
    5329 struct VmaBlockVector
    5330 {
    5331  VMA_CLASS_NO_COPY(VmaBlockVector)
    5332 public:
    5333  VmaBlockVector(
    5334  VmaAllocator hAllocator,
    5335  uint32_t memoryTypeIndex,
    5336  VkDeviceSize preferredBlockSize,
    5337  size_t minBlockCount,
    5338  size_t maxBlockCount,
    5339  VkDeviceSize bufferImageGranularity,
    5340  uint32_t frameInUseCount,
    5341  bool isCustomPool,
    5342  bool explicitBlockSize,
    5343  uint32_t algorithm);
    5344  ~VmaBlockVector();
    5345 
    5346  VkResult CreateMinBlocks();
    5347 
    5348  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5349  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5350  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5351  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5352  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5353 
    5354  void GetPoolStats(VmaPoolStats* pStats);
    5355 
    5356  bool IsEmpty() const { return m_Blocks.empty(); }
    5357  bool IsCorruptionDetectionEnabled() const;
    5358 
    5359  VkResult Allocate(
    5360  VmaPool hCurrentPool,
    5361  uint32_t currentFrameIndex,
    5362  VkDeviceSize size,
    5363  VkDeviceSize alignment,
    5364  const VmaAllocationCreateInfo& createInfo,
    5365  VmaSuballocationType suballocType,
    5366  VmaAllocation* pAllocation);
    5367 
    5368  void Free(
    5369  VmaAllocation hAllocation);
    5370 
    5371  // Adds statistics of this BlockVector to pStats.
    5372  void AddStats(VmaStats* pStats);
    5373 
    5374 #if VMA_STATS_STRING_ENABLED
    5375  void PrintDetailedMap(class VmaJsonWriter& json);
    5376 #endif
    5377 
    5378  void MakePoolAllocationsLost(
    5379  uint32_t currentFrameIndex,
    5380  size_t* pLostAllocationCount);
    5381  VkResult CheckCorruption();
    5382 
    5383  VmaDefragmentator* EnsureDefragmentator(
    5384  VmaAllocator hAllocator,
    5385  uint32_t currentFrameIndex);
    5386 
    5387  VkResult Defragment(
    5388  VmaDefragmentationStats* pDefragmentationStats,
    5389  VkDeviceSize& maxBytesToMove,
    5390  uint32_t& maxAllocationsToMove);
    5391 
    5392  void DestroyDefragmentator();
    5393 
    5394 private:
    5395  friend class VmaDefragmentator;
    5396 
    5397  const VmaAllocator m_hAllocator;
    5398  const uint32_t m_MemoryTypeIndex;
    5399  const VkDeviceSize m_PreferredBlockSize;
    5400  const size_t m_MinBlockCount;
    5401  const size_t m_MaxBlockCount;
    5402  const VkDeviceSize m_BufferImageGranularity;
    5403  const uint32_t m_FrameInUseCount;
    5404  const bool m_IsCustomPool;
    5405  const bool m_ExplicitBlockSize;
    5406  const uint32_t m_Algorithm;
    5407  bool m_HasEmptyBlock;
    5408  VMA_MUTEX m_Mutex;
    5409  // Incrementally sorted by sumFreeSize, ascending.
    5410  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5411  /* There can be at most one allocation that is completely empty - a
    5412  hysteresis to avoid pessimistic case of alternating creation and destruction
    5413  of a VkDeviceMemory. */
    5414  VmaDefragmentator* m_pDefragmentator;
    5415  uint32_t m_NextBlockId;
    5416 
    5417  VkDeviceSize CalcMaxBlockSize() const;
    5418 
    5419  // Finds and removes given block from vector.
    5420  void Remove(VmaDeviceMemoryBlock* pBlock);
    5421 
    5422  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5423  // after this call.
    5424  void IncrementallySortBlocks();
    5425 
    5426  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5427  VkResult AllocateFromBlock(
    5428  VmaDeviceMemoryBlock* pBlock,
    5429  VmaPool hCurrentPool,
    5430  uint32_t currentFrameIndex,
    5431  VkDeviceSize size,
    5432  VkDeviceSize alignment,
    5433  VmaAllocationCreateFlags allocFlags,
    5434  void* pUserData,
    5435  VmaSuballocationType suballocType,
    5436  uint32_t strategy,
    5437  VmaAllocation* pAllocation);
    5438 
    5439  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5440 };
    5441 
    5442 struct VmaPool_T
    5443 {
    5444  VMA_CLASS_NO_COPY(VmaPool_T)
    5445 public:
    5446  VmaBlockVector m_BlockVector;
    5447 
    5448  VmaPool_T(
    5449  VmaAllocator hAllocator,
    5450  const VmaPoolCreateInfo& createInfo,
    5451  VkDeviceSize preferredBlockSize);
    5452  ~VmaPool_T();
    5453 
    5454  uint32_t GetId() const { return m_Id; }
    5455  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5456 
    5457 #if VMA_STATS_STRING_ENABLED
    5458  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5459 #endif
    5460 
    5461 private:
    5462  uint32_t m_Id;
    5463 };
    5464 
    5465 class VmaDefragmentator
    5466 {
    5467  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5468 private:
    5469  const VmaAllocator m_hAllocator;
    5470  VmaBlockVector* const m_pBlockVector;
    5471  uint32_t m_CurrentFrameIndex;
    5472  VkDeviceSize m_BytesMoved;
    5473  uint32_t m_AllocationsMoved;
    5474 
    5475  struct AllocationInfo
    5476  {
    5477  VmaAllocation m_hAllocation;
    5478  VkBool32* m_pChanged;
    5479 
    5480  AllocationInfo() :
    5481  m_hAllocation(VK_NULL_HANDLE),
    5482  m_pChanged(VMA_NULL)
    5483  {
    5484  }
    5485  };
    5486 
    5487  struct AllocationInfoSizeGreater
    5488  {
    5489  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5490  {
    5491  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5492  }
    5493  };
    5494 
    5495  // Used between AddAllocation and Defragment.
    5496  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5497 
    5498  struct BlockInfo
    5499  {
    5500  VmaDeviceMemoryBlock* m_pBlock;
    5501  bool m_HasNonMovableAllocations;
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5505  m_pBlock(VMA_NULL),
    5506  m_HasNonMovableAllocations(true),
    5507  m_Allocations(pAllocationCallbacks),
    5508  m_pMappedDataForDefragmentation(VMA_NULL)
    5509  {
    5510  }
    5511 
    5512  void CalcHasNonMovableAllocations()
    5513  {
    5514  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5515  const size_t defragmentAllocCount = m_Allocations.size();
    5516  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5517  }
    5518 
    5519  void SortAllocationsBySizeDescecnding()
    5520  {
    5521  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5522  }
    5523 
    5524  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5525  void Unmap(VmaAllocator hAllocator);
    5526 
    5527  private:
    5528  // Not null if mapped for defragmentation only, not originally mapped.
    5529  void* m_pMappedDataForDefragmentation;
    5530  };
    5531 
    5532  struct BlockPointerLess
    5533  {
    5534  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5535  {
    5536  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5537  }
    5538  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5539  {
    5540  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5541  }
    5542  };
    5543 
    5544  // 1. Blocks with some non-movable allocations go first.
    5545  // 2. Blocks with smaller sumFreeSize go first.
    5546  struct BlockInfoCompareMoveDestination
    5547  {
    5548  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5549  {
    5550  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5551  {
    5552  return true;
    5553  }
    5554  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5555  {
    5556  return false;
    5557  }
    5558  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5559  {
    5560  return true;
    5561  }
    5562  return false;
    5563  }
    5564  };
    5565 
    5566  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5567  BlockInfoVector m_Blocks;
    5568 
    5569  VkResult DefragmentRound(
    5570  VkDeviceSize maxBytesToMove,
    5571  uint32_t maxAllocationsToMove);
    5572 
    5573  static bool MoveMakesSense(
    5574  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5575  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5576 
    5577 public:
    5578  VmaDefragmentator(
    5579  VmaAllocator hAllocator,
    5580  VmaBlockVector* pBlockVector,
    5581  uint32_t currentFrameIndex);
    5582 
    5583  ~VmaDefragmentator();
    5584 
    5585  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5586  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5587 
    5588  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5589 
    5590  VkResult Defragment(
    5591  VkDeviceSize maxBytesToMove,
    5592  uint32_t maxAllocationsToMove);
    5593 };
    5594 
    5595 #if VMA_RECORDING_ENABLED
    5596 
    5597 class VmaRecorder
    5598 {
    5599 public:
    5600  VmaRecorder();
    5601  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5602  void WriteConfiguration(
    5603  const VkPhysicalDeviceProperties& devProps,
    5604  const VkPhysicalDeviceMemoryProperties& memProps,
    5605  bool dedicatedAllocationExtensionEnabled);
    5606  ~VmaRecorder();
    5607 
    5608  void RecordCreateAllocator(uint32_t frameIndex);
    5609  void RecordDestroyAllocator(uint32_t frameIndex);
    5610  void RecordCreatePool(uint32_t frameIndex,
    5611  const VmaPoolCreateInfo& createInfo,
    5612  VmaPool pool);
    5613  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5614  void RecordAllocateMemory(uint32_t frameIndex,
    5615  const VkMemoryRequirements& vkMemReq,
    5616  const VmaAllocationCreateInfo& createInfo,
    5617  VmaAllocation allocation);
    5618  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5619  const VkMemoryRequirements& vkMemReq,
    5620  bool requiresDedicatedAllocation,
    5621  bool prefersDedicatedAllocation,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordFreeMemory(uint32_t frameIndex,
    5631  VmaAllocation allocation);
    5632  void RecordResizeAllocation(
    5633  uint32_t frameIndex,
    5634  VmaAllocation allocation,
    5635  VkDeviceSize newSize);
    5636  void RecordSetAllocationUserData(uint32_t frameIndex,
    5637  VmaAllocation allocation,
    5638  const void* pUserData);
    5639  void RecordCreateLostAllocation(uint32_t frameIndex,
    5640  VmaAllocation allocation);
    5641  void RecordMapMemory(uint32_t frameIndex,
    5642  VmaAllocation allocation);
    5643  void RecordUnmapMemory(uint32_t frameIndex,
    5644  VmaAllocation allocation);
    5645  void RecordFlushAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5647  void RecordInvalidateAllocation(uint32_t frameIndex,
    5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5649  void RecordCreateBuffer(uint32_t frameIndex,
    5650  const VkBufferCreateInfo& bufCreateInfo,
    5651  const VmaAllocationCreateInfo& allocCreateInfo,
    5652  VmaAllocation allocation);
    5653  void RecordCreateImage(uint32_t frameIndex,
    5654  const VkImageCreateInfo& imageCreateInfo,
    5655  const VmaAllocationCreateInfo& allocCreateInfo,
    5656  VmaAllocation allocation);
    5657  void RecordDestroyBuffer(uint32_t frameIndex,
    5658  VmaAllocation allocation);
    5659  void RecordDestroyImage(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordTouchAllocation(uint32_t frameIndex,
    5662  VmaAllocation allocation);
    5663  void RecordGetAllocationInfo(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5666  VmaPool pool);
    5667 
    5668 private:
    5669  struct CallParams
    5670  {
    5671  uint32_t threadId;
    5672  double time;
    5673  };
    5674 
    5675  class UserDataString
    5676  {
    5677  public:
    5678  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5679  const char* GetString() const { return m_Str; }
    5680 
    5681  private:
    5682  char m_PtrStr[17];
    5683  const char* m_Str;
    5684  };
    5685 
    5686  bool m_UseMutex;
    5687  VmaRecordFlags m_Flags;
    5688  FILE* m_File;
    5689  VMA_MUTEX m_FileMutex;
    5690  int64_t m_Freq;
    5691  int64_t m_StartCounter;
    5692 
    5693  void GetBasicParams(CallParams& outParams);
    5694  void Flush();
    5695 };
    5696 
    5697 #endif // #if VMA_RECORDING_ENABLED
    5698 
    5699 // Main allocator object.
    5700 struct VmaAllocator_T
    5701 {
    5702  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5703 public:
    5704  bool m_UseMutex;
    5705  bool m_UseKhrDedicatedAllocation;
    5706  VkDevice m_hDevice;
    5707  bool m_AllocationCallbacksSpecified;
    5708  VkAllocationCallbacks m_AllocationCallbacks;
    5709  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5710 
    5711  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5712  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5713  VMA_MUTEX m_HeapSizeLimitMutex;
    5714 
    5715  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5716  VkPhysicalDeviceMemoryProperties m_MemProps;
    5717 
    5718  // Default pools.
    5719  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5720 
    5721  // Each vector is sorted by memory (handle value).
    5722  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5723  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5724  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5725 
    5726  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5727  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5728  ~VmaAllocator_T();
    5729 
    5730  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5731  {
    5732  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5733  }
    5734  const VmaVulkanFunctions& GetVulkanFunctions() const
    5735  {
    5736  return m_VulkanFunctions;
    5737  }
    5738 
    5739  VkDeviceSize GetBufferImageGranularity() const
    5740  {
    5741  return VMA_MAX(
    5742  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5743  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5744  }
    5745 
    5746  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5747  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5748 
    5749  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5750  {
    5751  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5752  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5753  }
    5754  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5755  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5756  {
    5757  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5758  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5759  }
    5760  // Minimum alignment for all allocations in specific memory type.
    5761  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5762  {
    5763  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5764  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5765  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5766  }
    5767 
    5768  bool IsIntegratedGpu() const
    5769  {
    5770  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5771  }
    5772 
    5773 #if VMA_RECORDING_ENABLED
    5774  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5775 #endif
    5776 
    5777  void GetBufferMemoryRequirements(
    5778  VkBuffer hBuffer,
    5779  VkMemoryRequirements& memReq,
    5780  bool& requiresDedicatedAllocation,
    5781  bool& prefersDedicatedAllocation) const;
    5782  void GetImageMemoryRequirements(
    5783  VkImage hImage,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787 
    5788  // Main allocation function.
    5789  VkResult AllocateMemory(
    5790  const VkMemoryRequirements& vkMemReq,
    5791  bool requiresDedicatedAllocation,
    5792  bool prefersDedicatedAllocation,
    5793  VkBuffer dedicatedBuffer,
    5794  VkImage dedicatedImage,
    5795  const VmaAllocationCreateInfo& createInfo,
    5796  VmaSuballocationType suballocType,
    5797  VmaAllocation* pAllocation);
    5798 
    5799  // Main deallocation function.
    5800  void FreeMemory(const VmaAllocation allocation);
    5801 
    5802  VkResult ResizeAllocation(
    5803  const VmaAllocation alloc,
    5804  VkDeviceSize newSize);
    5805 
    5806  void CalculateStats(VmaStats* pStats);
    5807 
    5808 #if VMA_STATS_STRING_ENABLED
    5809  void PrintDetailedMap(class VmaJsonWriter& json);
    5810 #endif
    5811 
    5812  VkResult Defragment(
    5813  VmaAllocation* pAllocations,
    5814  size_t allocationCount,
    5815  VkBool32* pAllocationsChanged,
    5816  const VmaDefragmentationInfo* pDefragmentationInfo,
    5817  VmaDefragmentationStats* pDefragmentationStats);
    5818 
    5819  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5820  bool TouchAllocation(VmaAllocation hAllocation);
    5821 
    5822  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5823  void DestroyPool(VmaPool pool);
    5824  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5825 
    5826  void SetCurrentFrameIndex(uint32_t frameIndex);
    5827  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5828 
    5829  void MakePoolAllocationsLost(
    5830  VmaPool hPool,
    5831  size_t* pLostAllocationCount);
    5832  VkResult CheckPoolCorruption(VmaPool hPool);
    5833  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5834 
    5835  void CreateLostAllocation(VmaAllocation* pAllocation);
    5836 
    5837  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5838  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5839 
    5840  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5841  void Unmap(VmaAllocation hAllocation);
    5842 
    5843  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5844  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5845 
    5846  void FlushOrInvalidateAllocation(
    5847  VmaAllocation hAllocation,
    5848  VkDeviceSize offset, VkDeviceSize size,
    5849  VMA_CACHE_OPERATION op);
    5850 
    5851  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5852 
    5853 private:
    5854  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5855 
    5856  VkPhysicalDevice m_PhysicalDevice;
    5857  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5858 
    5859  VMA_MUTEX m_PoolsMutex;
    5860  // Protected by m_PoolsMutex. Sorted by pointer value.
    5861  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5862  uint32_t m_NextPoolId;
    5863 
    5864  VmaVulkanFunctions m_VulkanFunctions;
    5865 
    5866 #if VMA_RECORDING_ENABLED
    5867  VmaRecorder* m_pRecorder;
    5868 #endif
    5869 
    5870  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5871 
    5872  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5873 
    5874  VkResult AllocateMemoryOfType(
    5875  VkDeviceSize size,
    5876  VkDeviceSize alignment,
    5877  bool dedicatedAllocation,
    5878  VkBuffer dedicatedBuffer,
    5879  VkImage dedicatedImage,
    5880  const VmaAllocationCreateInfo& createInfo,
    5881  uint32_t memTypeIndex,
    5882  VmaSuballocationType suballocType,
    5883  VmaAllocation* pAllocation);
    5884 
    5885  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5886  VkResult AllocateDedicatedMemory(
    5887  VkDeviceSize size,
    5888  VmaSuballocationType suballocType,
    5889  uint32_t memTypeIndex,
    5890  bool map,
    5891  bool isUserDataString,
    5892  void* pUserData,
    5893  VkBuffer dedicatedBuffer,
    5894  VkImage dedicatedImage,
    5895  VmaAllocation* pAllocation);
    5896 
    5897  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5898  void FreeDedicatedMemory(VmaAllocation allocation);
    5899 };
    5900 
    5902 // Memory allocation #2 after VmaAllocator_T definition
    5903 
    5904 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5905 {
    5906  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5907 }
    5908 
    5909 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5910 {
    5911  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5912 }
    5913 
    5914 template<typename T>
    5915 static T* VmaAllocate(VmaAllocator hAllocator)
    5916 {
    5917  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5928 {
    5929  if(ptr != VMA_NULL)
    5930  {
    5931  ptr->~T();
    5932  VmaFree(hAllocator, ptr);
    5933  }
    5934 }
    5935 
    5936 template<typename T>
    5937 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5938 {
    5939  if(ptr != VMA_NULL)
    5940  {
    5941  for(size_t i = count; i--; )
    5942  ptr[i].~T();
    5943  VmaFree(hAllocator, ptr);
    5944  }
    5945 }
    5946 
    5948 // VmaStringBuilder
    5949 
    5950 #if VMA_STATS_STRING_ENABLED
    5951 
    5952 class VmaStringBuilder
    5953 {
    5954 public:
    5955  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5956  size_t GetLength() const { return m_Data.size(); }
    5957  const char* GetData() const { return m_Data.data(); }
    5958 
    5959  void Add(char ch) { m_Data.push_back(ch); }
    5960  void Add(const char* pStr);
    5961  void AddNewLine() { Add('\n'); }
    5962  void AddNumber(uint32_t num);
    5963  void AddNumber(uint64_t num);
    5964  void AddPointer(const void* ptr);
    5965 
    5966 private:
    5967  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5968 };
    5969 
    5970 void VmaStringBuilder::Add(const char* pStr)
    5971 {
    5972  const size_t strLen = strlen(pStr);
    5973  if(strLen > 0)
    5974  {
    5975  const size_t oldCount = m_Data.size();
    5976  m_Data.resize(oldCount + strLen);
    5977  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5978  }
    5979 }
    5980 
    5981 void VmaStringBuilder::AddNumber(uint32_t num)
    5982 {
    5983  char buf[11];
    5984  VmaUint32ToStr(buf, sizeof(buf), num);
    5985  Add(buf);
    5986 }
    5987 
    5988 void VmaStringBuilder::AddNumber(uint64_t num)
    5989 {
    5990  char buf[21];
    5991  VmaUint64ToStr(buf, sizeof(buf), num);
    5992  Add(buf);
    5993 }
    5994 
    5995 void VmaStringBuilder::AddPointer(const void* ptr)
    5996 {
    5997  char buf[21];
    5998  VmaPtrToStr(buf, sizeof(buf), ptr);
    5999  Add(buf);
    6000 }
    6001 
    6002 #endif // #if VMA_STATS_STRING_ENABLED
    6003 
    6005 // VmaJsonWriter
    6006 
    6007 #if VMA_STATS_STRING_ENABLED
    6008 
    6009 class VmaJsonWriter
    6010 {
    6011  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6012 public:
    6013  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6014  ~VmaJsonWriter();
    6015 
    6016  void BeginObject(bool singleLine = false);
    6017  void EndObject();
    6018 
    6019  void BeginArray(bool singleLine = false);
    6020  void EndArray();
    6021 
    6022  void WriteString(const char* pStr);
    6023  void BeginString(const char* pStr = VMA_NULL);
    6024  void ContinueString(const char* pStr);
    6025  void ContinueString(uint32_t n);
    6026  void ContinueString(uint64_t n);
    6027  void ContinueString_Pointer(const void* ptr);
    6028  void EndString(const char* pStr = VMA_NULL);
    6029 
    6030  void WriteNumber(uint32_t n);
    6031  void WriteNumber(uint64_t n);
    6032  void WriteBool(bool b);
    6033  void WriteNull();
    6034 
    6035 private:
    6036  static const char* const INDENT;
    6037 
    6038  enum COLLECTION_TYPE
    6039  {
    6040  COLLECTION_TYPE_OBJECT,
    6041  COLLECTION_TYPE_ARRAY,
    6042  };
    6043  struct StackItem
    6044  {
    6045  COLLECTION_TYPE type;
    6046  uint32_t valueCount;
    6047  bool singleLineMode;
    6048  };
    6049 
    6050  VmaStringBuilder& m_SB;
    6051  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6052  bool m_InsideString;
    6053 
    6054  void BeginValue(bool isString);
    6055  void WriteIndent(bool oneLess = false);
    6056 };
    6057 
    6058 const char* const VmaJsonWriter::INDENT = " ";
    6059 
    6060 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6061  m_SB(sb),
    6062  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6063  m_InsideString(false)
    6064 {
    6065 }
    6066 
    6067 VmaJsonWriter::~VmaJsonWriter()
    6068 {
    6069  VMA_ASSERT(!m_InsideString);
    6070  VMA_ASSERT(m_Stack.empty());
    6071 }
    6072 
    6073 void VmaJsonWriter::BeginObject(bool singleLine)
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076 
    6077  BeginValue(false);
    6078  m_SB.Add('{');
    6079 
    6080  StackItem item;
    6081  item.type = COLLECTION_TYPE_OBJECT;
    6082  item.valueCount = 0;
    6083  item.singleLineMode = singleLine;
    6084  m_Stack.push_back(item);
    6085 }
    6086 
    6087 void VmaJsonWriter::EndObject()
    6088 {
    6089  VMA_ASSERT(!m_InsideString);
    6090 
    6091  WriteIndent(true);
    6092  m_SB.Add('}');
    6093 
    6094  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6095  m_Stack.pop_back();
    6096 }
    6097 
    6098 void VmaJsonWriter::BeginArray(bool singleLine)
    6099 {
    6100  VMA_ASSERT(!m_InsideString);
    6101 
    6102  BeginValue(false);
    6103  m_SB.Add('[');
    6104 
    6105  StackItem item;
    6106  item.type = COLLECTION_TYPE_ARRAY;
    6107  item.valueCount = 0;
    6108  item.singleLineMode = singleLine;
    6109  m_Stack.push_back(item);
    6110 }
    6111 
    6112 void VmaJsonWriter::EndArray()
    6113 {
    6114  VMA_ASSERT(!m_InsideString);
    6115 
    6116  WriteIndent(true);
    6117  m_SB.Add(']');
    6118 
    6119  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6120  m_Stack.pop_back();
    6121 }
    6122 
    6123 void VmaJsonWriter::WriteString(const char* pStr)
    6124 {
    6125  BeginString(pStr);
    6126  EndString();
    6127 }
    6128 
    6129 void VmaJsonWriter::BeginString(const char* pStr)
    6130 {
    6131  VMA_ASSERT(!m_InsideString);
    6132 
    6133  BeginValue(true);
    6134  m_SB.Add('"');
    6135  m_InsideString = true;
    6136  if(pStr != VMA_NULL && pStr[0] != '\0')
    6137  {
    6138  ContinueString(pStr);
    6139  }
    6140 }
    6141 
    6142 void VmaJsonWriter::ContinueString(const char* pStr)
    6143 {
    6144  VMA_ASSERT(m_InsideString);
    6145 
    6146  const size_t strLen = strlen(pStr);
    6147  for(size_t i = 0; i < strLen; ++i)
    6148  {
    6149  char ch = pStr[i];
    6150  if(ch == '\\')
    6151  {
    6152  m_SB.Add("\\\\");
    6153  }
    6154  else if(ch == '"')
    6155  {
    6156  m_SB.Add("\\\"");
    6157  }
    6158  else if(ch >= 32)
    6159  {
    6160  m_SB.Add(ch);
    6161  }
    6162  else switch(ch)
    6163  {
    6164  case '\b':
    6165  m_SB.Add("\\b");
    6166  break;
    6167  case '\f':
    6168  m_SB.Add("\\f");
    6169  break;
    6170  case '\n':
    6171  m_SB.Add("\\n");
    6172  break;
    6173  case '\r':
    6174  m_SB.Add("\\r");
    6175  break;
    6176  case '\t':
    6177  m_SB.Add("\\t");
    6178  break;
    6179  default:
    6180  VMA_ASSERT(0 && "Character not currently supported.");
    6181  break;
    6182  }
    6183  }
    6184 }
    6185 
    6186 void VmaJsonWriter::ContinueString(uint32_t n)
    6187 {
    6188  VMA_ASSERT(m_InsideString);
    6189  m_SB.AddNumber(n);
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint64_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddPointer(ptr);
    6202 }
    6203 
    6204 void VmaJsonWriter::EndString(const char* pStr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  if(pStr != VMA_NULL && pStr[0] != '\0')
    6208  {
    6209  ContinueString(pStr);
    6210  }
    6211  m_SB.Add('"');
    6212  m_InsideString = false;
    6213 }
    6214 
    6215 void VmaJsonWriter::WriteNumber(uint32_t n)
    6216 {
    6217  VMA_ASSERT(!m_InsideString);
    6218  BeginValue(false);
    6219  m_SB.AddNumber(n);
    6220 }
    6221 
    6222 void VmaJsonWriter::WriteNumber(uint64_t n)
    6223 {
    6224  VMA_ASSERT(!m_InsideString);
    6225  BeginValue(false);
    6226  m_SB.AddNumber(n);
    6227 }
    6228 
    6229 void VmaJsonWriter::WriteBool(bool b)
    6230 {
    6231  VMA_ASSERT(!m_InsideString);
    6232  BeginValue(false);
    6233  m_SB.Add(b ? "true" : "false");
    6234 }
    6235 
    6236 void VmaJsonWriter::WriteNull()
    6237 {
    6238  VMA_ASSERT(!m_InsideString);
    6239  BeginValue(false);
    6240  m_SB.Add("null");
    6241 }
    6242 
    6243 void VmaJsonWriter::BeginValue(bool isString)
    6244 {
    6245  if(!m_Stack.empty())
    6246  {
    6247  StackItem& currItem = m_Stack.back();
    6248  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6249  currItem.valueCount % 2 == 0)
    6250  {
    6251  VMA_ASSERT(isString);
    6252  }
    6253 
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 != 0)
    6256  {
    6257  m_SB.Add(": ");
    6258  }
    6259  else if(currItem.valueCount > 0)
    6260  {
    6261  m_SB.Add(", ");
    6262  WriteIndent();
    6263  }
    6264  else
    6265  {
    6266  WriteIndent();
    6267  }
    6268  ++currItem.valueCount;
    6269  }
    6270 }
    6271 
    6272 void VmaJsonWriter::WriteIndent(bool oneLess)
    6273 {
    6274  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6275  {
    6276  m_SB.AddNewLine();
    6277 
    6278  size_t count = m_Stack.size();
    6279  if(count > 0 && oneLess)
    6280  {
    6281  --count;
    6282  }
    6283  for(size_t i = 0; i < count; ++i)
    6284  {
    6285  m_SB.Add(INDENT);
    6286  }
    6287  }
    6288 }
    6289 
    6290 #endif // #if VMA_STATS_STRING_ENABLED
    6291 
    6293 
    6294 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6295 {
    6296  if(IsUserDataString())
    6297  {
    6298  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6299 
    6300  FreeUserDataString(hAllocator);
    6301 
    6302  if(pUserData != VMA_NULL)
    6303  {
    6304  const char* const newStrSrc = (char*)pUserData;
    6305  const size_t newStrLen = strlen(newStrSrc);
    6306  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6307  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6308  m_pUserData = newStrDst;
    6309  }
    6310  }
    6311  else
    6312  {
    6313  m_pUserData = pUserData;
    6314  }
    6315 }
    6316 
    6317 void VmaAllocation_T::ChangeBlockAllocation(
    6318  VmaAllocator hAllocator,
    6319  VmaDeviceMemoryBlock* block,
    6320  VkDeviceSize offset)
    6321 {
    6322  VMA_ASSERT(block != VMA_NULL);
    6323  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6324 
    6325  // Move mapping reference counter from old block to new block.
    6326  if(block != m_BlockAllocation.m_Block)
    6327  {
    6328  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6329  if(IsPersistentMap())
    6330  ++mapRefCount;
    6331  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6332  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6333  }
    6334 
    6335  m_BlockAllocation.m_Block = block;
    6336  m_BlockAllocation.m_Offset = offset;
    6337 }
    6338 
    6339 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6340 {
    6341  VMA_ASSERT(newSize > 0);
    6342  m_Size = newSize;
    6343 }
    6344 
    6345 VkDeviceSize VmaAllocation_T::GetOffset() const
    6346 {
    6347  switch(m_Type)
    6348  {
    6349  case ALLOCATION_TYPE_BLOCK:
    6350  return m_BlockAllocation.m_Offset;
    6351  case ALLOCATION_TYPE_DEDICATED:
    6352  return 0;
    6353  default:
    6354  VMA_ASSERT(0);
    6355  return 0;
    6356  }
    6357 }
    6358 
    6359 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6360 {
    6361  switch(m_Type)
    6362  {
    6363  case ALLOCATION_TYPE_BLOCK:
    6364  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6365  case ALLOCATION_TYPE_DEDICATED:
    6366  return m_DedicatedAllocation.m_hMemory;
    6367  default:
    6368  VMA_ASSERT(0);
    6369  return VK_NULL_HANDLE;
    6370  }
    6371 }
    6372 
    6373 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6374 {
    6375  switch(m_Type)
    6376  {
    6377  case ALLOCATION_TYPE_BLOCK:
    6378  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6379  case ALLOCATION_TYPE_DEDICATED:
    6380  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6381  default:
    6382  VMA_ASSERT(0);
    6383  return UINT32_MAX;
    6384  }
    6385 }
    6386 
    6387 void* VmaAllocation_T::GetMappedData() const
    6388 {
    6389  switch(m_Type)
    6390  {
    6391  case ALLOCATION_TYPE_BLOCK:
    6392  if(m_MapCount != 0)
    6393  {
    6394  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6395  VMA_ASSERT(pBlockData != VMA_NULL);
    6396  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6397  }
    6398  else
    6399  {
    6400  return VMA_NULL;
    6401  }
    6402  break;
    6403  case ALLOCATION_TYPE_DEDICATED:
    6404  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6405  return m_DedicatedAllocation.m_pMappedData;
    6406  default:
    6407  VMA_ASSERT(0);
    6408  return VMA_NULL;
    6409  }
    6410 }
    6411 
    6412 bool VmaAllocation_T::CanBecomeLost() const
    6413 {
    6414  switch(m_Type)
    6415  {
    6416  case ALLOCATION_TYPE_BLOCK:
    6417  return m_BlockAllocation.m_CanBecomeLost;
    6418  case ALLOCATION_TYPE_DEDICATED:
    6419  return false;
    6420  default:
    6421  VMA_ASSERT(0);
    6422  return false;
    6423  }
    6424 }
    6425 
    6426 VmaPool VmaAllocation_T::GetPool() const
    6427 {
    6428  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6429  return m_BlockAllocation.m_hPool;
    6430 }
    6431 
    6432 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6433 {
    6434  VMA_ASSERT(CanBecomeLost());
    6435 
    6436  /*
    6437  Warning: This is a carefully designed algorithm.
    6438  Do not modify unless you really know what you're doing :)
    6439  */
    6440  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6441  for(;;)
    6442  {
    6443  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6444  {
    6445  VMA_ASSERT(0);
    6446  return false;
    6447  }
    6448  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6449  {
    6450  return false;
    6451  }
    6452  else // Last use time earlier than current time.
    6453  {
    6454  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6455  {
    6456  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6457  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6458  return true;
    6459  }
    6460  }
    6461  }
    6462 }
    6463 
    6464 #if VMA_STATS_STRING_ENABLED
    6465 
    6466 // Correspond to values of enum VmaSuballocationType.
    6467 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6468  "FREE",
    6469  "UNKNOWN",
    6470  "BUFFER",
    6471  "IMAGE_UNKNOWN",
    6472  "IMAGE_LINEAR",
    6473  "IMAGE_OPTIMAL",
    6474 };
    6475 
    6476 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6477 {
    6478  json.WriteString("Type");
    6479  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6480 
    6481  json.WriteString("Size");
    6482  json.WriteNumber(m_Size);
    6483 
    6484  if(m_pUserData != VMA_NULL)
    6485  {
    6486  json.WriteString("UserData");
    6487  if(IsUserDataString())
    6488  {
    6489  json.WriteString((const char*)m_pUserData);
    6490  }
    6491  else
    6492  {
    6493  json.BeginString();
    6494  json.ContinueString_Pointer(m_pUserData);
    6495  json.EndString();
    6496  }
    6497  }
    6498 
    6499  json.WriteString("CreationFrameIndex");
    6500  json.WriteNumber(m_CreationFrameIndex);
    6501 
    6502  json.WriteString("LastUseFrameIndex");
    6503  json.WriteNumber(GetLastUseFrameIndex());
    6504 
    6505  if(m_BufferImageUsage != 0)
    6506  {
    6507  json.WriteString("Usage");
    6508  json.WriteNumber(m_BufferImageUsage);
    6509  }
    6510 }
    6511 
    6512 #endif
    6513 
    6514 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6515 {
    6516  VMA_ASSERT(IsUserDataString());
    6517  if(m_pUserData != VMA_NULL)
    6518  {
    6519  char* const oldStr = (char*)m_pUserData;
    6520  const size_t oldStrLen = strlen(oldStr);
    6521  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6522  m_pUserData = VMA_NULL;
    6523  }
    6524 }
    6525 
    6526 void VmaAllocation_T::BlockAllocMap()
    6527 {
    6528  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6529 
    6530  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6531  {
    6532  ++m_MapCount;
    6533  }
    6534  else
    6535  {
    6536  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6537  }
    6538 }
    6539 
    6540 void VmaAllocation_T::BlockAllocUnmap()
    6541 {
    6542  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6543 
    6544  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6545  {
    6546  --m_MapCount;
    6547  }
    6548  else
    6549  {
    6550  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6551  }
    6552 }
    6553 
    6554 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6555 {
    6556  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6557 
    6558  if(m_MapCount != 0)
    6559  {
    6560  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6561  {
    6562  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6563  *ppData = m_DedicatedAllocation.m_pMappedData;
    6564  ++m_MapCount;
    6565  return VK_SUCCESS;
    6566  }
    6567  else
    6568  {
    6569  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6570  return VK_ERROR_MEMORY_MAP_FAILED;
    6571  }
    6572  }
    6573  else
    6574  {
    6575  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6576  hAllocator->m_hDevice,
    6577  m_DedicatedAllocation.m_hMemory,
    6578  0, // offset
    6579  VK_WHOLE_SIZE,
    6580  0, // flags
    6581  ppData);
    6582  if(result == VK_SUCCESS)
    6583  {
    6584  m_DedicatedAllocation.m_pMappedData = *ppData;
    6585  m_MapCount = 1;
    6586  }
    6587  return result;
    6588  }
    6589 }
    6590 
    6591 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6592 {
    6593  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6594 
    6595  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6596  {
    6597  --m_MapCount;
    6598  if(m_MapCount == 0)
    6599  {
    6600  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6601  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6602  hAllocator->m_hDevice,
    6603  m_DedicatedAllocation.m_hMemory);
    6604  }
    6605  }
    6606  else
    6607  {
    6608  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6609  }
    6610 }
    6611 
    6612 #if VMA_STATS_STRING_ENABLED
    6613 
    6614 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6615 {
    6616  json.BeginObject();
    6617 
    6618  json.WriteString("Blocks");
    6619  json.WriteNumber(stat.blockCount);
    6620 
    6621  json.WriteString("Allocations");
    6622  json.WriteNumber(stat.allocationCount);
    6623 
    6624  json.WriteString("UnusedRanges");
    6625  json.WriteNumber(stat.unusedRangeCount);
    6626 
    6627  json.WriteString("UsedBytes");
    6628  json.WriteNumber(stat.usedBytes);
    6629 
    6630  json.WriteString("UnusedBytes");
    6631  json.WriteNumber(stat.unusedBytes);
    6632 
    6633  if(stat.allocationCount > 1)
    6634  {
    6635  json.WriteString("AllocationSize");
    6636  json.BeginObject(true);
    6637  json.WriteString("Min");
    6638  json.WriteNumber(stat.allocationSizeMin);
    6639  json.WriteString("Avg");
    6640  json.WriteNumber(stat.allocationSizeAvg);
    6641  json.WriteString("Max");
    6642  json.WriteNumber(stat.allocationSizeMax);
    6643  json.EndObject();
    6644  }
    6645 
    6646  if(stat.unusedRangeCount > 1)
    6647  {
    6648  json.WriteString("UnusedRangeSize");
    6649  json.BeginObject(true);
    6650  json.WriteString("Min");
    6651  json.WriteNumber(stat.unusedRangeSizeMin);
    6652  json.WriteString("Avg");
    6653  json.WriteNumber(stat.unusedRangeSizeAvg);
    6654  json.WriteString("Max");
    6655  json.WriteNumber(stat.unusedRangeSizeMax);
    6656  json.EndObject();
    6657  }
    6658 
    6659  json.EndObject();
    6660 }
    6661 
    6662 #endif // #if VMA_STATS_STRING_ENABLED
    6663 
    6664 struct VmaSuballocationItemSizeLess
    6665 {
    6666  bool operator()(
    6667  const VmaSuballocationList::iterator lhs,
    6668  const VmaSuballocationList::iterator rhs) const
    6669  {
    6670  return lhs->size < rhs->size;
    6671  }
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  VkDeviceSize rhsSize) const
    6675  {
    6676  return lhs->size < rhsSize;
    6677  }
    6678 };
    6679 
    6680 
    6682 // class VmaBlockMetadata
    6683 
    6684 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6685  m_Size(0),
    6686  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6687 {
    6688 }
    6689 
    6690 #if VMA_STATS_STRING_ENABLED
    6691 
    6692 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6693  VkDeviceSize unusedBytes,
    6694  size_t allocationCount,
    6695  size_t unusedRangeCount) const
    6696 {
    6697  json.BeginObject();
    6698 
    6699  json.WriteString("TotalBytes");
    6700  json.WriteNumber(GetSize());
    6701 
    6702  json.WriteString("UnusedBytes");
    6703  json.WriteNumber(unusedBytes);
    6704 
    6705  json.WriteString("Allocations");
    6706  json.WriteNumber((uint64_t)allocationCount);
    6707 
    6708  json.WriteString("UnusedRanges");
    6709  json.WriteNumber((uint64_t)unusedRangeCount);
    6710 
    6711  json.WriteString("Suballocations");
    6712  json.BeginArray();
    6713 }
    6714 
    6715 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6716  VkDeviceSize offset,
    6717  VmaAllocation hAllocation) const
    6718 {
    6719  json.BeginObject(true);
    6720 
    6721  json.WriteString("Offset");
    6722  json.WriteNumber(offset);
    6723 
    6724  hAllocation->PrintParameters(json);
    6725 
    6726  json.EndObject();
    6727 }
    6728 
    6729 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6730  VkDeviceSize offset,
    6731  VkDeviceSize size) const
    6732 {
    6733  json.BeginObject(true);
    6734 
    6735  json.WriteString("Offset");
    6736  json.WriteNumber(offset);
    6737 
    6738  json.WriteString("Type");
    6739  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6740 
    6741  json.WriteString("Size");
    6742  json.WriteNumber(size);
    6743 
    6744  json.EndObject();
    6745 }
    6746 
    6747 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6748 {
    6749  json.EndArray();
    6750  json.EndObject();
    6751 }
    6752 
    6753 #endif // #if VMA_STATS_STRING_ENABLED
    6754 
    6756 // class VmaBlockMetadata_Generic
    6757 
    6758 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6759  VmaBlockMetadata(hAllocator),
    6760  m_FreeCount(0),
    6761  m_SumFreeSize(0),
    6762  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6763  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6764 {
    6765 }
    6766 
    6767 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6768 {
    6769 }
    6770 
    6771 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6772 {
    6773  VmaBlockMetadata::Init(size);
    6774 
    6775  m_FreeCount = 1;
    6776  m_SumFreeSize = size;
    6777 
    6778  VmaSuballocation suballoc = {};
    6779  suballoc.offset = 0;
    6780  suballoc.size = size;
    6781  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6782  suballoc.hAllocation = VK_NULL_HANDLE;
    6783 
    6784  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6785  m_Suballocations.push_back(suballoc);
    6786  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6787  --suballocItem;
    6788  m_FreeSuballocationsBySize.push_back(suballocItem);
    6789 }
    6790 
    6791 bool VmaBlockMetadata_Generic::Validate() const
    6792 {
    6793  VMA_VALIDATE(!m_Suballocations.empty());
    6794 
    6795  // Expected offset of new suballocation as calculated from previous ones.
    6796  VkDeviceSize calculatedOffset = 0;
    6797  // Expected number of free suballocations as calculated from traversing their list.
    6798  uint32_t calculatedFreeCount = 0;
    6799  // Expected sum size of free suballocations as calculated from traversing their list.
    6800  VkDeviceSize calculatedSumFreeSize = 0;
    6801  // Expected number of free suballocations that should be registered in
    6802  // m_FreeSuballocationsBySize calculated from traversing their list.
    6803  size_t freeSuballocationsToRegister = 0;
    6804  // True if previous visited suballocation was free.
    6805  bool prevFree = false;
    6806 
    6807  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6808  suballocItem != m_Suballocations.cend();
    6809  ++suballocItem)
    6810  {
    6811  const VmaSuballocation& subAlloc = *suballocItem;
    6812 
    6813  // Actual offset of this suballocation doesn't match expected one.
    6814  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6815 
    6816  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6817  // Two adjacent free suballocations are invalid. They should be merged.
    6818  VMA_VALIDATE(!prevFree || !currFree);
    6819 
    6820  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6821 
    6822  if(currFree)
    6823  {
    6824  calculatedSumFreeSize += subAlloc.size;
    6825  ++calculatedFreeCount;
    6826  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6827  {
    6828  ++freeSuballocationsToRegister;
    6829  }
    6830 
    6831  // Margin required between allocations - every free space must be at least that large.
    6832  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6833  }
    6834  else
    6835  {
    6836  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6837  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6838 
    6839  // Margin required between allocations - previous allocation must be free.
    6840  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6841  }
    6842 
    6843  calculatedOffset += subAlloc.size;
    6844  prevFree = currFree;
    6845  }
    6846 
    6847  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6848  // match expected one.
    6849  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6850 
    6851  VkDeviceSize lastSize = 0;
    6852  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6853  {
    6854  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6855 
    6856  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6857  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6858  // They must be sorted by size ascending.
    6859  VMA_VALIDATE(suballocItem->size >= lastSize);
    6860 
    6861  lastSize = suballocItem->size;
    6862  }
    6863 
    6864  // Check if totals match calculacted values.
    6865  VMA_VALIDATE(ValidateFreeSuballocationList());
    6866  VMA_VALIDATE(calculatedOffset == GetSize());
    6867  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6868  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6869 
    6870  return true;
    6871 }
    6872 
    6873 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6874 {
    6875  if(!m_FreeSuballocationsBySize.empty())
    6876  {
    6877  return m_FreeSuballocationsBySize.back()->size;
    6878  }
    6879  else
    6880  {
    6881  return 0;
    6882  }
    6883 }
    6884 
    6885 bool VmaBlockMetadata_Generic::IsEmpty() const
    6886 {
    6887  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6888 }
    6889 
    6890 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6891 {
    6892  outInfo.blockCount = 1;
    6893 
    6894  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6895  outInfo.allocationCount = rangeCount - m_FreeCount;
    6896  outInfo.unusedRangeCount = m_FreeCount;
    6897 
    6898  outInfo.unusedBytes = m_SumFreeSize;
    6899  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6900 
    6901  outInfo.allocationSizeMin = UINT64_MAX;
    6902  outInfo.allocationSizeMax = 0;
    6903  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6904  outInfo.unusedRangeSizeMax = 0;
    6905 
    6906  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6907  suballocItem != m_Suballocations.cend();
    6908  ++suballocItem)
    6909  {
    6910  const VmaSuballocation& suballoc = *suballocItem;
    6911  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6912  {
    6913  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6914  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6915  }
    6916  else
    6917  {
    6918  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6919  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6920  }
    6921  }
    6922 }
    6923 
    6924 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6925 {
    6926  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6927 
    6928  inoutStats.size += GetSize();
    6929  inoutStats.unusedSize += m_SumFreeSize;
    6930  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6931  inoutStats.unusedRangeCount += m_FreeCount;
    6932  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6933 }
    6934 
    6935 #if VMA_STATS_STRING_ENABLED
    6936 
    6937 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6938 {
    6939  PrintDetailedMap_Begin(json,
    6940  m_SumFreeSize, // unusedBytes
    6941  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6942  m_FreeCount); // unusedRangeCount
    6943 
    6944  size_t i = 0;
    6945  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6946  suballocItem != m_Suballocations.cend();
    6947  ++suballocItem, ++i)
    6948  {
    6949  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6950  {
    6951  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6952  }
    6953  else
    6954  {
    6955  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6956  }
    6957  }
    6958 
    6959  PrintDetailedMap_End(json);
    6960 }
    6961 
    6962 #endif // #if VMA_STATS_STRING_ENABLED
    6963 
    6964 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6965  uint32_t currentFrameIndex,
    6966  uint32_t frameInUseCount,
    6967  VkDeviceSize bufferImageGranularity,
    6968  VkDeviceSize allocSize,
    6969  VkDeviceSize allocAlignment,
    6970  bool upperAddress,
    6971  VmaSuballocationType allocType,
    6972  bool canMakeOtherLost,
    6973  uint32_t strategy,
    6974  VmaAllocationRequest* pAllocationRequest)
    6975 {
    6976  VMA_ASSERT(allocSize > 0);
    6977  VMA_ASSERT(!upperAddress);
    6978  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6979  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6980  VMA_HEAVY_ASSERT(Validate());
    6981 
    6982  // There is not enough total free space in this block to fullfill the request: Early return.
    6983  if(canMakeOtherLost == false &&
    6984  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6985  {
    6986  return false;
    6987  }
    6988 
    6989  // New algorithm, efficiently searching freeSuballocationsBySize.
    6990  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6991  if(freeSuballocCount > 0)
    6992  {
    6994  {
    6995  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6996  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6997  m_FreeSuballocationsBySize.data(),
    6998  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    6999  allocSize + 2 * VMA_DEBUG_MARGIN,
    7000  VmaSuballocationItemSizeLess());
    7001  size_t index = it - m_FreeSuballocationsBySize.data();
    7002  for(; index < freeSuballocCount; ++index)
    7003  {
    7004  if(CheckAllocation(
    7005  currentFrameIndex,
    7006  frameInUseCount,
    7007  bufferImageGranularity,
    7008  allocSize,
    7009  allocAlignment,
    7010  allocType,
    7011  m_FreeSuballocationsBySize[index],
    7012  false, // canMakeOtherLost
    7013  &pAllocationRequest->offset,
    7014  &pAllocationRequest->itemsToMakeLostCount,
    7015  &pAllocationRequest->sumFreeSize,
    7016  &pAllocationRequest->sumItemSize))
    7017  {
    7018  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7019  return true;
    7020  }
    7021  }
    7022  }
    7023  else // WORST_FIT, FIRST_FIT
    7024  {
    7025  // Search staring from biggest suballocations.
    7026  for(size_t index = freeSuballocCount; index--; )
    7027  {
    7028  if(CheckAllocation(
    7029  currentFrameIndex,
    7030  frameInUseCount,
    7031  bufferImageGranularity,
    7032  allocSize,
    7033  allocAlignment,
    7034  allocType,
    7035  m_FreeSuballocationsBySize[index],
    7036  false, // canMakeOtherLost
    7037  &pAllocationRequest->offset,
    7038  &pAllocationRequest->itemsToMakeLostCount,
    7039  &pAllocationRequest->sumFreeSize,
    7040  &pAllocationRequest->sumItemSize))
    7041  {
    7042  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7043  return true;
    7044  }
    7045  }
    7046  }
    7047  }
    7048 
    7049  if(canMakeOtherLost)
    7050  {
    7051  // Brute-force algorithm. TODO: Come up with something better.
    7052 
    7053  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7054  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7055 
    7056  VmaAllocationRequest tmpAllocRequest = {};
    7057  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7058  suballocIt != m_Suballocations.end();
    7059  ++suballocIt)
    7060  {
    7061  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7062  suballocIt->hAllocation->CanBecomeLost())
    7063  {
    7064  if(CheckAllocation(
    7065  currentFrameIndex,
    7066  frameInUseCount,
    7067  bufferImageGranularity,
    7068  allocSize,
    7069  allocAlignment,
    7070  allocType,
    7071  suballocIt,
    7072  canMakeOtherLost,
    7073  &tmpAllocRequest.offset,
    7074  &tmpAllocRequest.itemsToMakeLostCount,
    7075  &tmpAllocRequest.sumFreeSize,
    7076  &tmpAllocRequest.sumItemSize))
    7077  {
    7078  tmpAllocRequest.item = suballocIt;
    7079 
    7080  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7082  {
    7083  *pAllocationRequest = tmpAllocRequest;
    7084  }
    7085  }
    7086  }
    7087  }
    7088 
    7089  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7090  {
    7091  return true;
    7092  }
    7093  }
    7094 
    7095  return false;
    7096 }
    7097 
    7098 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7099  uint32_t currentFrameIndex,
    7100  uint32_t frameInUseCount,
    7101  VmaAllocationRequest* pAllocationRequest)
    7102 {
    7103  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7104  {
    7105  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7106  {
    7107  ++pAllocationRequest->item;
    7108  }
    7109  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7110  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7111  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7112  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7113  {
    7114  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7115  --pAllocationRequest->itemsToMakeLostCount;
    7116  }
    7117  else
    7118  {
    7119  return false;
    7120  }
    7121  }
    7122 
    7123  VMA_HEAVY_ASSERT(Validate());
    7124  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7125  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7126 
    7127  return true;
    7128 }
    7129 
    7130 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7131 {
    7132  uint32_t lostAllocationCount = 0;
    7133  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7134  it != m_Suballocations.end();
    7135  ++it)
    7136  {
    7137  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7138  it->hAllocation->CanBecomeLost() &&
    7139  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7140  {
    7141  it = FreeSuballocation(it);
    7142  ++lostAllocationCount;
    7143  }
    7144  }
    7145  return lostAllocationCount;
    7146 }
    7147 
    7148 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7149 {
    7150  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7151  it != m_Suballocations.end();
    7152  ++it)
    7153  {
    7154  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7155  {
    7156  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7157  {
    7158  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7159  return VK_ERROR_VALIDATION_FAILED_EXT;
    7160  }
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  }
    7167  }
    7168 
    7169  return VK_SUCCESS;
    7170 }
    7171 
    7172 void VmaBlockMetadata_Generic::Alloc(
    7173  const VmaAllocationRequest& request,
    7174  VmaSuballocationType type,
    7175  VkDeviceSize allocSize,
    7176  bool upperAddress,
    7177  VmaAllocation hAllocation)
    7178 {
    7179  VMA_ASSERT(!upperAddress);
    7180  VMA_ASSERT(request.item != m_Suballocations.end());
    7181  VmaSuballocation& suballoc = *request.item;
    7182  // Given suballocation is a free block.
    7183  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7184  // Given offset is inside this suballocation.
    7185  VMA_ASSERT(request.offset >= suballoc.offset);
    7186  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7187  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7188  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7189 
    7190  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7191  // it to become used.
    7192  UnregisterFreeSuballocation(request.item);
    7193 
    7194  suballoc.offset = request.offset;
    7195  suballoc.size = allocSize;
    7196  suballoc.type = type;
    7197  suballoc.hAllocation = hAllocation;
    7198 
    7199  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7200  if(paddingEnd)
    7201  {
    7202  VmaSuballocation paddingSuballoc = {};
    7203  paddingSuballoc.offset = request.offset + allocSize;
    7204  paddingSuballoc.size = paddingEnd;
    7205  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7206  VmaSuballocationList::iterator next = request.item;
    7207  ++next;
    7208  const VmaSuballocationList::iterator paddingEndItem =
    7209  m_Suballocations.insert(next, paddingSuballoc);
    7210  RegisterFreeSuballocation(paddingEndItem);
    7211  }
    7212 
    7213  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7214  if(paddingBegin)
    7215  {
    7216  VmaSuballocation paddingSuballoc = {};
    7217  paddingSuballoc.offset = request.offset - paddingBegin;
    7218  paddingSuballoc.size = paddingBegin;
    7219  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7220  const VmaSuballocationList::iterator paddingBeginItem =
    7221  m_Suballocations.insert(request.item, paddingSuballoc);
    7222  RegisterFreeSuballocation(paddingBeginItem);
    7223  }
    7224 
    7225  // Update totals.
    7226  m_FreeCount = m_FreeCount - 1;
    7227  if(paddingBegin > 0)
    7228  {
    7229  ++m_FreeCount;
    7230  }
    7231  if(paddingEnd > 0)
    7232  {
    7233  ++m_FreeCount;
    7234  }
    7235  m_SumFreeSize -= allocSize;
    7236 }
    7237 
    7238 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7239 {
    7240  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7241  suballocItem != m_Suballocations.end();
    7242  ++suballocItem)
    7243  {
    7244  VmaSuballocation& suballoc = *suballocItem;
    7245  if(suballoc.hAllocation == allocation)
    7246  {
    7247  FreeSuballocation(suballocItem);
    7248  VMA_HEAVY_ASSERT(Validate());
    7249  return;
    7250  }
    7251  }
    7252  VMA_ASSERT(0 && "Not found!");
    7253 }
    7254 
    7255 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7256 {
    7257  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7258  suballocItem != m_Suballocations.end();
    7259  ++suballocItem)
    7260  {
    7261  VmaSuballocation& suballoc = *suballocItem;
    7262  if(suballoc.offset == offset)
    7263  {
    7264  FreeSuballocation(suballocItem);
    7265  return;
    7266  }
    7267  }
    7268  VMA_ASSERT(0 && "Not found!");
    7269 }
    7270 
    7271 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7272 {
    7273  typedef VmaSuballocationList::iterator iter_type;
    7274  for(iter_type suballocItem = m_Suballocations.begin();
    7275  suballocItem != m_Suballocations.end();
    7276  ++suballocItem)
    7277  {
    7278  VmaSuballocation& suballoc = *suballocItem;
    7279  if(suballoc.hAllocation == alloc)
    7280  {
    7281  iter_type nextItem = suballocItem;
    7282  ++nextItem;
    7283 
    7284  // Should have been ensured on higher level.
    7285  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7286 
    7287  // Shrinking.
    7288  if(newSize < alloc->GetSize())
    7289  {
    7290  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7291 
    7292  // There is next item.
    7293  if(nextItem != m_Suballocations.end())
    7294  {
    7295  // Next item is free.
    7296  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7297  {
    7298  // Grow this next item backward.
    7299  UnregisterFreeSuballocation(nextItem);
    7300  nextItem->offset -= sizeDiff;
    7301  nextItem->size += sizeDiff;
    7302  RegisterFreeSuballocation(nextItem);
    7303  }
    7304  // Next item is not free.
    7305  else
    7306  {
    7307  // Create free item after current one.
    7308  VmaSuballocation newFreeSuballoc;
    7309  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7310  newFreeSuballoc.offset = suballoc.offset + newSize;
    7311  newFreeSuballoc.size = sizeDiff;
    7312  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7313  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7314  RegisterFreeSuballocation(newFreeSuballocIt);
    7315 
    7316  ++m_FreeCount;
    7317  }
    7318  }
    7319  // This is the last item.
    7320  else
    7321  {
    7322  // Create free item at the end.
    7323  VmaSuballocation newFreeSuballoc;
    7324  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7325  newFreeSuballoc.offset = suballoc.offset + newSize;
    7326  newFreeSuballoc.size = sizeDiff;
    7327  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7328  m_Suballocations.push_back(newFreeSuballoc);
    7329 
    7330  iter_type newFreeSuballocIt = m_Suballocations.end();
    7331  RegisterFreeSuballocation(--newFreeSuballocIt);
    7332 
    7333  ++m_FreeCount;
    7334  }
    7335 
    7336  suballoc.size = newSize;
    7337  m_SumFreeSize += sizeDiff;
    7338  }
    7339  // Growing.
    7340  else
    7341  {
    7342  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7343 
    7344  // There is next item.
    7345  if(nextItem != m_Suballocations.end())
    7346  {
    7347  // Next item is free.
    7348  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7349  {
    7350  // There is not enough free space, including margin.
    7351  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7352  {
    7353  return false;
    7354  }
    7355 
    7356  // There is more free space than required.
    7357  if(nextItem->size > sizeDiff)
    7358  {
    7359  // Move and shrink this next item.
    7360  UnregisterFreeSuballocation(nextItem);
    7361  nextItem->offset += sizeDiff;
    7362  nextItem->size -= sizeDiff;
    7363  RegisterFreeSuballocation(nextItem);
    7364  }
    7365  // There is exactly the amount of free space required.
    7366  else
    7367  {
    7368  // Remove this next free item.
    7369  UnregisterFreeSuballocation(nextItem);
    7370  m_Suballocations.erase(nextItem);
    7371  --m_FreeCount;
    7372  }
    7373  }
    7374  // Next item is not free - there is no space to grow.
    7375  else
    7376  {
    7377  return false;
    7378  }
    7379  }
    7380  // This is the last item - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385 
    7386  suballoc.size = newSize;
    7387  m_SumFreeSize -= sizeDiff;
    7388  }
    7389 
    7390  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7391  return true;
    7392  }
    7393  }
    7394  VMA_ASSERT(0 && "Not found!");
    7395  return false;
    7396 }
    7397 
    7398 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7399 {
    7400  VkDeviceSize lastSize = 0;
    7401  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7402  {
    7403  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7404 
    7405  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7406  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7407  VMA_VALIDATE(it->size >= lastSize);
    7408  lastSize = it->size;
    7409  }
    7410  return true;
    7411 }
    7412 
    7413 bool VmaBlockMetadata_Generic::CheckAllocation(
    7414  uint32_t currentFrameIndex,
    7415  uint32_t frameInUseCount,
    7416  VkDeviceSize bufferImageGranularity,
    7417  VkDeviceSize allocSize,
    7418  VkDeviceSize allocAlignment,
    7419  VmaSuballocationType allocType,
    7420  VmaSuballocationList::const_iterator suballocItem,
    7421  bool canMakeOtherLost,
    7422  VkDeviceSize* pOffset,
    7423  size_t* itemsToMakeLostCount,
    7424  VkDeviceSize* pSumFreeSize,
    7425  VkDeviceSize* pSumItemSize) const
    7426 {
    7427  VMA_ASSERT(allocSize > 0);
    7428  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7429  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7430  VMA_ASSERT(pOffset != VMA_NULL);
    7431 
    7432  *itemsToMakeLostCount = 0;
    7433  *pSumFreeSize = 0;
    7434  *pSumItemSize = 0;
    7435 
    7436  if(canMakeOtherLost)
    7437  {
    7438  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7439  {
    7440  *pSumFreeSize = suballocItem->size;
    7441  }
    7442  else
    7443  {
    7444  if(suballocItem->hAllocation->CanBecomeLost() &&
    7445  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7446  {
    7447  ++*itemsToMakeLostCount;
    7448  *pSumItemSize = suballocItem->size;
    7449  }
    7450  else
    7451  {
    7452  return false;
    7453  }
    7454  }
    7455 
    7456  // Remaining size is too small for this request: Early return.
    7457  if(GetSize() - suballocItem->offset < allocSize)
    7458  {
    7459  return false;
    7460  }
    7461 
    7462  // Start from offset equal to beginning of this suballocation.
    7463  *pOffset = suballocItem->offset;
    7464 
    7465  // Apply VMA_DEBUG_MARGIN at the beginning.
    7466  if(VMA_DEBUG_MARGIN > 0)
    7467  {
    7468  *pOffset += VMA_DEBUG_MARGIN;
    7469  }
    7470 
    7471  // Apply alignment.
    7472  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7473 
    7474  // Check previous suballocations for BufferImageGranularity conflicts.
    7475  // Make bigger alignment if necessary.
    7476  if(bufferImageGranularity > 1)
    7477  {
    7478  bool bufferImageGranularityConflict = false;
    7479  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7480  while(prevSuballocItem != m_Suballocations.cbegin())
    7481  {
    7482  --prevSuballocItem;
    7483  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7484  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7485  {
    7486  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7487  {
    7488  bufferImageGranularityConflict = true;
    7489  break;
    7490  }
    7491  }
    7492  else
    7493  // Already on previous page.
    7494  break;
    7495  }
    7496  if(bufferImageGranularityConflict)
    7497  {
    7498  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7499  }
    7500  }
    7501 
    7502  // Now that we have final *pOffset, check if we are past suballocItem.
    7503  // If yes, return false - this function should be called for another suballocItem as starting point.
    7504  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7505  {
    7506  return false;
    7507  }
    7508 
    7509  // Calculate padding at the beginning based on current offset.
    7510  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7511 
    7512  // Calculate required margin at the end.
    7513  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7514 
    7515  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7516  // Another early return check.
    7517  if(suballocItem->offset + totalSize > GetSize())
    7518  {
    7519  return false;
    7520  }
    7521 
    7522  // Advance lastSuballocItem until desired size is reached.
    7523  // Update itemsToMakeLostCount.
    7524  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7525  if(totalSize > suballocItem->size)
    7526  {
    7527  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7528  while(remainingSize > 0)
    7529  {
    7530  ++lastSuballocItem;
    7531  if(lastSuballocItem == m_Suballocations.cend())
    7532  {
    7533  return false;
    7534  }
    7535  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7536  {
    7537  *pSumFreeSize += lastSuballocItem->size;
    7538  }
    7539  else
    7540  {
    7541  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7542  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7543  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7544  {
    7545  ++*itemsToMakeLostCount;
    7546  *pSumItemSize += lastSuballocItem->size;
    7547  }
    7548  else
    7549  {
    7550  return false;
    7551  }
    7552  }
    7553  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7554  remainingSize - lastSuballocItem->size : 0;
    7555  }
    7556  }
    7557 
    7558  // Check next suballocations for BufferImageGranularity conflicts.
    7559  // If conflict exists, we must mark more allocations lost or fail.
    7560  if(bufferImageGranularity > 1)
    7561  {
    7562  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7563  ++nextSuballocItem;
    7564  while(nextSuballocItem != m_Suballocations.cend())
    7565  {
    7566  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7567  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7568  {
    7569  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7570  {
    7571  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7572  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7573  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7574  {
    7575  ++*itemsToMakeLostCount;
    7576  }
    7577  else
    7578  {
    7579  return false;
    7580  }
    7581  }
    7582  }
    7583  else
    7584  {
    7585  // Already on next page.
    7586  break;
    7587  }
    7588  ++nextSuballocItem;
    7589  }
    7590  }
    7591  }
    7592  else
    7593  {
    7594  const VmaSuballocation& suballoc = *suballocItem;
    7595  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7596 
    7597  *pSumFreeSize = suballoc.size;
    7598 
    7599  // Size of this suballocation is too small for this request: Early return.
    7600  if(suballoc.size < allocSize)
    7601  {
    7602  return false;
    7603  }
    7604 
    7605  // Start from offset equal to beginning of this suballocation.
    7606  *pOffset = suballoc.offset;
    7607 
    7608  // Apply VMA_DEBUG_MARGIN at the beginning.
    7609  if(VMA_DEBUG_MARGIN > 0)
    7610  {
    7611  *pOffset += VMA_DEBUG_MARGIN;
    7612  }
    7613 
    7614  // Apply alignment.
    7615  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7616 
    7617  // Check previous suballocations for BufferImageGranularity conflicts.
    7618  // Make bigger alignment if necessary.
    7619  if(bufferImageGranularity > 1)
    7620  {
    7621  bool bufferImageGranularityConflict = false;
    7622  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7623  while(prevSuballocItem != m_Suballocations.cbegin())
    7624  {
    7625  --prevSuballocItem;
    7626  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7627  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7628  {
    7629  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7630  {
    7631  bufferImageGranularityConflict = true;
    7632  break;
    7633  }
    7634  }
    7635  else
    7636  // Already on previous page.
    7637  break;
    7638  }
    7639  if(bufferImageGranularityConflict)
    7640  {
    7641  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7642  }
    7643  }
    7644 
    7645  // Calculate padding at the beginning based on current offset.
    7646  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7647 
    7648  // Calculate required margin at the end.
    7649  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7650 
    7651  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7652  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7653  {
    7654  return false;
    7655  }
    7656 
    7657  // Check next suballocations for BufferImageGranularity conflicts.
    7658  // If conflict exists, allocation cannot be made here.
    7659  if(bufferImageGranularity > 1)
    7660  {
    7661  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7662  ++nextSuballocItem;
    7663  while(nextSuballocItem != m_Suballocations.cend())
    7664  {
    7665  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7666  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7667  {
    7668  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7669  {
    7670  return false;
    7671  }
    7672  }
    7673  else
    7674  {
    7675  // Already on next page.
    7676  break;
    7677  }
    7678  ++nextSuballocItem;
    7679  }
    7680  }
    7681  }
    7682 
    7683  // All tests passed: Success. pOffset is already filled.
    7684  return true;
    7685 }
    7686 
    7687 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7688 {
    7689  VMA_ASSERT(item != m_Suballocations.end());
    7690  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7691 
    7692  VmaSuballocationList::iterator nextItem = item;
    7693  ++nextItem;
    7694  VMA_ASSERT(nextItem != m_Suballocations.end());
    7695  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  item->size += nextItem->size;
    7698  --m_FreeCount;
    7699  m_Suballocations.erase(nextItem);
    7700 }
    7701 
    7702 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7703 {
    7704  // Change this suballocation to be marked as free.
    7705  VmaSuballocation& suballoc = *suballocItem;
    7706  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7707  suballoc.hAllocation = VK_NULL_HANDLE;
    7708 
    7709  // Update totals.
    7710  ++m_FreeCount;
    7711  m_SumFreeSize += suballoc.size;
    7712 
    7713  // Merge with previous and/or next suballocation if it's also free.
    7714  bool mergeWithNext = false;
    7715  bool mergeWithPrev = false;
    7716 
    7717  VmaSuballocationList::iterator nextItem = suballocItem;
    7718  ++nextItem;
    7719  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7720  {
    7721  mergeWithNext = true;
    7722  }
    7723 
    7724  VmaSuballocationList::iterator prevItem = suballocItem;
    7725  if(suballocItem != m_Suballocations.begin())
    7726  {
    7727  --prevItem;
    7728  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7729  {
    7730  mergeWithPrev = true;
    7731  }
    7732  }
    7733 
    7734  if(mergeWithNext)
    7735  {
    7736  UnregisterFreeSuballocation(nextItem);
    7737  MergeFreeWithNext(suballocItem);
    7738  }
    7739 
    7740  if(mergeWithPrev)
    7741  {
    7742  UnregisterFreeSuballocation(prevItem);
    7743  MergeFreeWithNext(prevItem);
    7744  RegisterFreeSuballocation(prevItem);
    7745  return prevItem;
    7746  }
    7747  else
    7748  {
    7749  RegisterFreeSuballocation(suballocItem);
    7750  return suballocItem;
    7751  }
    7752 }
    7753 
    7754 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7755 {
    7756  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7757  VMA_ASSERT(item->size > 0);
    7758 
    7759  // You may want to enable this validation at the beginning or at the end of
    7760  // this function, depending on what do you want to check.
    7761  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7762 
    7763  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7764  {
    7765  if(m_FreeSuballocationsBySize.empty())
    7766  {
    7767  m_FreeSuballocationsBySize.push_back(item);
    7768  }
    7769  else
    7770  {
    7771  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7772  }
    7773  }
    7774 
    7775  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7776 }
    7777 
    7778 
    7779 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7780 {
    7781  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7782  VMA_ASSERT(item->size > 0);
    7783 
    7784  // You may want to enable this validation at the beginning or at the end of
    7785  // this function, depending on what do you want to check.
    7786  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7787 
    7788  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7789  {
    7790  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7791  m_FreeSuballocationsBySize.data(),
    7792  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7793  item,
    7794  VmaSuballocationItemSizeLess());
    7795  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7796  index < m_FreeSuballocationsBySize.size();
    7797  ++index)
    7798  {
    7799  if(m_FreeSuballocationsBySize[index] == item)
    7800  {
    7801  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7802  return;
    7803  }
    7804  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7805  }
    7806  VMA_ASSERT(0 && "Not found.");
    7807  }
    7808 
    7809  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7810 }
    7811 
    7813 // class VmaBlockMetadata_Linear
    7814 
    7815 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7816  VmaBlockMetadata(hAllocator),
    7817  m_SumFreeSize(0),
    7818  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7819  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7820  m_1stVectorIndex(0),
    7821  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7822  m_1stNullItemsBeginCount(0),
    7823  m_1stNullItemsMiddleCount(0),
    7824  m_2ndNullItemsCount(0)
    7825 {
    7826 }
    7827 
    7828 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7829 {
    7830 }
    7831 
    7832 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7833 {
    7834  VmaBlockMetadata::Init(size);
    7835  m_SumFreeSize = size;
    7836 }
    7837 
    7838 bool VmaBlockMetadata_Linear::Validate() const
    7839 {
    7840  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7841  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7842 
    7843  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7844  VMA_VALIDATE(!suballocations1st.empty() ||
    7845  suballocations2nd.empty() ||
    7846  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7847 
    7848  if(!suballocations1st.empty())
    7849  {
    7850  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7851  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7852  // Null item at the end should be just pop_back().
    7853  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7854  }
    7855  if(!suballocations2nd.empty())
    7856  {
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860 
    7861  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7862  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7863 
    7864  VkDeviceSize sumUsedSize = 0;
    7865  const size_t suballoc1stCount = suballocations1st.size();
    7866  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7867 
    7868  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7869  {
    7870  const size_t suballoc2ndCount = suballocations2nd.size();
    7871  size_t nullItem2ndCount = 0;
    7872  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7873  {
    7874  const VmaSuballocation& suballoc = suballocations2nd[i];
    7875  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7876 
    7877  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7878  VMA_VALIDATE(suballoc.offset >= offset);
    7879 
    7880  if(!currFree)
    7881  {
    7882  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7883  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7884  sumUsedSize += suballoc.size;
    7885  }
    7886  else
    7887  {
    7888  ++nullItem2ndCount;
    7889  }
    7890 
    7891  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7892  }
    7893 
    7894  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7895  }
    7896 
    7897  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7898  {
    7899  const VmaSuballocation& suballoc = suballocations1st[i];
    7900  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7901  suballoc.hAllocation == VK_NULL_HANDLE);
    7902  }
    7903 
    7904  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7905 
    7906  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7907  {
    7908  const VmaSuballocation& suballoc = suballocations1st[i];
    7909  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7910 
    7911  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7912  VMA_VALIDATE(suballoc.offset >= offset);
    7913  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7914 
    7915  if(!currFree)
    7916  {
    7917  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7918  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7919  sumUsedSize += suballoc.size;
    7920  }
    7921  else
    7922  {
    7923  ++nullItem1stCount;
    7924  }
    7925 
    7926  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7927  }
    7928  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7929 
    7930  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7931  {
    7932  const size_t suballoc2ndCount = suballocations2nd.size();
    7933  size_t nullItem2ndCount = 0;
    7934  for(size_t i = suballoc2ndCount; i--; )
    7935  {
    7936  const VmaSuballocation& suballoc = suballocations2nd[i];
    7937  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7938 
    7939  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7940  VMA_VALIDATE(suballoc.offset >= offset);
    7941 
    7942  if(!currFree)
    7943  {
    7944  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7945  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7946  sumUsedSize += suballoc.size;
    7947  }
    7948  else
    7949  {
    7950  ++nullItem2ndCount;
    7951  }
    7952 
    7953  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7954  }
    7955 
    7956  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7957  }
    7958 
    7959  VMA_VALIDATE(offset <= GetSize());
    7960  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7961 
    7962  return true;
    7963 }
    7964 
    7965 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7966 {
    7967  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7968  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7969 }
    7970 
    7971 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7972 {
    7973  const VkDeviceSize size = GetSize();
    7974 
    7975  /*
    7976  We don't consider gaps inside allocation vectors with freed allocations because
    7977  they are not suitable for reuse in linear allocator. We consider only space that
    7978  is available for new allocations.
    7979  */
    7980  if(IsEmpty())
    7981  {
    7982  return size;
    7983  }
    7984 
    7985  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7986 
    7987  switch(m_2ndVectorMode)
    7988  {
    7989  case SECOND_VECTOR_EMPTY:
    7990  /*
    7991  Available space is after end of 1st, as well as before beginning of 1st (which
    7992  whould make it a ring buffer).
    7993  */
    7994  {
    7995  const size_t suballocations1stCount = suballocations1st.size();
    7996  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7997  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    7998  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    7999  return VMA_MAX(
    8000  firstSuballoc.offset,
    8001  size - (lastSuballoc.offset + lastSuballoc.size));
    8002  }
    8003  break;
    8004 
    8005  case SECOND_VECTOR_RING_BUFFER:
    8006  /*
    8007  Available space is only between end of 2nd and beginning of 1st.
    8008  */
    8009  {
    8010  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8011  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8012  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8013  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8014  }
    8015  break;
    8016 
    8017  case SECOND_VECTOR_DOUBLE_STACK:
    8018  /*
    8019  Available space is only between end of 1st and top of 2nd.
    8020  */
    8021  {
    8022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8023  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8024  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8025  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8026  }
    8027  break;
    8028 
    8029  default:
    8030  VMA_ASSERT(0);
    8031  return 0;
    8032  }
    8033 }
    8034 
    8035 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8036 {
    8037  const VkDeviceSize size = GetSize();
    8038  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8039  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8040  const size_t suballoc1stCount = suballocations1st.size();
    8041  const size_t suballoc2ndCount = suballocations2nd.size();
    8042 
    8043  outInfo.blockCount = 1;
    8044  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8045  outInfo.unusedRangeCount = 0;
    8046  outInfo.usedBytes = 0;
    8047  outInfo.allocationSizeMin = UINT64_MAX;
    8048  outInfo.allocationSizeMax = 0;
    8049  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8050  outInfo.unusedRangeSizeMax = 0;
    8051 
    8052  VkDeviceSize lastOffset = 0;
    8053 
    8054  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8055  {
    8056  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8057  size_t nextAlloc2ndIndex = 0;
    8058  while(lastOffset < freeSpace2ndTo1stEnd)
    8059  {
    8060  // Find next non-null allocation or move nextAllocIndex to the end.
    8061  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8062  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8063  {
    8064  ++nextAlloc2ndIndex;
    8065  }
    8066 
    8067  // Found non-null allocation.
    8068  if(nextAlloc2ndIndex < suballoc2ndCount)
    8069  {
    8070  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8071 
    8072  // 1. Process free space before this allocation.
    8073  if(lastOffset < suballoc.offset)
    8074  {
    8075  // There is free space from lastOffset to suballoc.offset.
    8076  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8077  ++outInfo.unusedRangeCount;
    8078  outInfo.unusedBytes += unusedRangeSize;
    8079  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8080  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8081  }
    8082 
    8083  // 2. Process this allocation.
    8084  // There is allocation with suballoc.offset, suballoc.size.
    8085  outInfo.usedBytes += suballoc.size;
    8086  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8087  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8088 
    8089  // 3. Prepare for next iteration.
    8090  lastOffset = suballoc.offset + suballoc.size;
    8091  ++nextAlloc2ndIndex;
    8092  }
    8093  // We are at the end.
    8094  else
    8095  {
    8096  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8097  if(lastOffset < freeSpace2ndTo1stEnd)
    8098  {
    8099  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8100  ++outInfo.unusedRangeCount;
    8101  outInfo.unusedBytes += unusedRangeSize;
    8102  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8103  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8104  }
    8105 
    8106  // End of loop.
    8107  lastOffset = freeSpace2ndTo1stEnd;
    8108  }
    8109  }
    8110  }
    8111 
    8112  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8113  const VkDeviceSize freeSpace1stTo2ndEnd =
    8114  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8115  while(lastOffset < freeSpace1stTo2ndEnd)
    8116  {
    8117  // Find next non-null allocation or move nextAllocIndex to the end.
    8118  while(nextAlloc1stIndex < suballoc1stCount &&
    8119  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8120  {
    8121  ++nextAlloc1stIndex;
    8122  }
    8123 
    8124  // Found non-null allocation.
    8125  if(nextAlloc1stIndex < suballoc1stCount)
    8126  {
    8127  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8128 
    8129  // 1. Process free space before this allocation.
    8130  if(lastOffset < suballoc.offset)
    8131  {
    8132  // There is free space from lastOffset to suballoc.offset.
    8133  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8134  ++outInfo.unusedRangeCount;
    8135  outInfo.unusedBytes += unusedRangeSize;
    8136  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8137  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8138  }
    8139 
    8140  // 2. Process this allocation.
    8141  // There is allocation with suballoc.offset, suballoc.size.
    8142  outInfo.usedBytes += suballoc.size;
    8143  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8144  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8145 
    8146  // 3. Prepare for next iteration.
    8147  lastOffset = suballoc.offset + suballoc.size;
    8148  ++nextAlloc1stIndex;
    8149  }
    8150  // We are at the end.
    8151  else
    8152  {
    8153  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8154  if(lastOffset < freeSpace1stTo2ndEnd)
    8155  {
    8156  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8157  ++outInfo.unusedRangeCount;
    8158  outInfo.unusedBytes += unusedRangeSize;
    8159  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8160  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8161  }
    8162 
    8163  // End of loop.
    8164  lastOffset = freeSpace1stTo2ndEnd;
    8165  }
    8166  }
    8167 
    8168  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8169  {
    8170  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8171  while(lastOffset < size)
    8172  {
    8173  // Find next non-null allocation or move nextAllocIndex to the end.
    8174  while(nextAlloc2ndIndex != SIZE_MAX &&
    8175  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8176  {
    8177  --nextAlloc2ndIndex;
    8178  }
    8179 
    8180  // Found non-null allocation.
    8181  if(nextAlloc2ndIndex != SIZE_MAX)
    8182  {
    8183  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8184 
    8185  // 1. Process free space before this allocation.
    8186  if(lastOffset < suballoc.offset)
    8187  {
    8188  // There is free space from lastOffset to suballoc.offset.
    8189  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8190  ++outInfo.unusedRangeCount;
    8191  outInfo.unusedBytes += unusedRangeSize;
    8192  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8193  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8194  }
    8195 
    8196  // 2. Process this allocation.
    8197  // There is allocation with suballoc.offset, suballoc.size.
    8198  outInfo.usedBytes += suballoc.size;
    8199  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8200  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8201 
    8202  // 3. Prepare for next iteration.
    8203  lastOffset = suballoc.offset + suballoc.size;
    8204  --nextAlloc2ndIndex;
    8205  }
    8206  // We are at the end.
    8207  else
    8208  {
    8209  // There is free space from lastOffset to size.
    8210  if(lastOffset < size)
    8211  {
    8212  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8213  ++outInfo.unusedRangeCount;
    8214  outInfo.unusedBytes += unusedRangeSize;
    8215  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8216  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8217  }
    8218 
    8219  // End of loop.
    8220  lastOffset = size;
    8221  }
    8222  }
    8223  }
    8224 
    8225  outInfo.unusedBytes = size - outInfo.usedBytes;
    8226 }
    8227 
    8228 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8229 {
    8230  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8231  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8232  const VkDeviceSize size = GetSize();
    8233  const size_t suballoc1stCount = suballocations1st.size();
    8234  const size_t suballoc2ndCount = suballocations2nd.size();
    8235 
    8236  inoutStats.size += size;
    8237 
    8238  VkDeviceSize lastOffset = 0;
    8239 
    8240  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8241  {
    8242  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8243  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8244  while(lastOffset < freeSpace2ndTo1stEnd)
    8245  {
    8246  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8247  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8248  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8249  {
    8250  ++nextAlloc2ndIndex;
    8251  }
    8252 
    8253  // Found non-null allocation.
    8254  if(nextAlloc2ndIndex < suballoc2ndCount)
    8255  {
    8256  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8257 
    8258  // 1. Process free space before this allocation.
    8259  if(lastOffset < suballoc.offset)
    8260  {
    8261  // There is free space from lastOffset to suballoc.offset.
    8262  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8263  inoutStats.unusedSize += unusedRangeSize;
    8264  ++inoutStats.unusedRangeCount;
    8265  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8266  }
    8267 
    8268  // 2. Process this allocation.
    8269  // There is allocation with suballoc.offset, suballoc.size.
    8270  ++inoutStats.allocationCount;
    8271 
    8272  // 3. Prepare for next iteration.
    8273  lastOffset = suballoc.offset + suballoc.size;
    8274  ++nextAlloc2ndIndex;
    8275  }
    8276  // We are at the end.
    8277  else
    8278  {
    8279  if(lastOffset < freeSpace2ndTo1stEnd)
    8280  {
    8281  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8282  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8283  inoutStats.unusedSize += unusedRangeSize;
    8284  ++inoutStats.unusedRangeCount;
    8285  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8286  }
    8287 
    8288  // End of loop.
    8289  lastOffset = freeSpace2ndTo1stEnd;
    8290  }
    8291  }
    8292  }
    8293 
    8294  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8295  const VkDeviceSize freeSpace1stTo2ndEnd =
    8296  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8297  while(lastOffset < freeSpace1stTo2ndEnd)
    8298  {
    8299  // Find next non-null allocation or move nextAllocIndex to the end.
    8300  while(nextAlloc1stIndex < suballoc1stCount &&
    8301  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8302  {
    8303  ++nextAlloc1stIndex;
    8304  }
    8305 
    8306  // Found non-null allocation.
    8307  if(nextAlloc1stIndex < suballoc1stCount)
    8308  {
    8309  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8310 
    8311  // 1. Process free space before this allocation.
    8312  if(lastOffset < suballoc.offset)
    8313  {
    8314  // There is free space from lastOffset to suballoc.offset.
    8315  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8316  inoutStats.unusedSize += unusedRangeSize;
    8317  ++inoutStats.unusedRangeCount;
    8318  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8319  }
    8320 
    8321  // 2. Process this allocation.
    8322  // There is allocation with suballoc.offset, suballoc.size.
    8323  ++inoutStats.allocationCount;
    8324 
    8325  // 3. Prepare for next iteration.
    8326  lastOffset = suballoc.offset + suballoc.size;
    8327  ++nextAlloc1stIndex;
    8328  }
    8329  // We are at the end.
    8330  else
    8331  {
    8332  if(lastOffset < freeSpace1stTo2ndEnd)
    8333  {
    8334  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8335  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8336  inoutStats.unusedSize += unusedRangeSize;
    8337  ++inoutStats.unusedRangeCount;
    8338  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8339  }
    8340 
    8341  // End of loop.
    8342  lastOffset = freeSpace1stTo2ndEnd;
    8343  }
    8344  }
    8345 
    8346  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8347  {
    8348  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8349  while(lastOffset < size)
    8350  {
    8351  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8352  while(nextAlloc2ndIndex != SIZE_MAX &&
    8353  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8354  {
    8355  --nextAlloc2ndIndex;
    8356  }
    8357 
    8358  // Found non-null allocation.
    8359  if(nextAlloc2ndIndex != SIZE_MAX)
    8360  {
    8361  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8362 
    8363  // 1. Process free space before this allocation.
    8364  if(lastOffset < suballoc.offset)
    8365  {
    8366  // There is free space from lastOffset to suballoc.offset.
    8367  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8368  inoutStats.unusedSize += unusedRangeSize;
    8369  ++inoutStats.unusedRangeCount;
    8370  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8371  }
    8372 
    8373  // 2. Process this allocation.
    8374  // There is allocation with suballoc.offset, suballoc.size.
    8375  ++inoutStats.allocationCount;
    8376 
    8377  // 3. Prepare for next iteration.
    8378  lastOffset = suballoc.offset + suballoc.size;
    8379  --nextAlloc2ndIndex;
    8380  }
    8381  // We are at the end.
    8382  else
    8383  {
    8384  if(lastOffset < size)
    8385  {
    8386  // There is free space from lastOffset to size.
    8387  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8388  inoutStats.unusedSize += unusedRangeSize;
    8389  ++inoutStats.unusedRangeCount;
    8390  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8391  }
    8392 
    8393  // End of loop.
    8394  lastOffset = size;
    8395  }
    8396  }
    8397  }
    8398 }
    8399 
    8400 #if VMA_STATS_STRING_ENABLED
    8401 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8402 {
    8403  const VkDeviceSize size = GetSize();
    8404  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8405  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8406  const size_t suballoc1stCount = suballocations1st.size();
    8407  const size_t suballoc2ndCount = suballocations2nd.size();
    8408 
    8409  // FIRST PASS
    8410 
    8411  size_t unusedRangeCount = 0;
    8412  VkDeviceSize usedBytes = 0;
    8413 
    8414  VkDeviceSize lastOffset = 0;
    8415 
    8416  size_t alloc2ndCount = 0;
    8417  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8418  {
    8419  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8420  size_t nextAlloc2ndIndex = 0;
    8421  while(lastOffset < freeSpace2ndTo1stEnd)
    8422  {
    8423  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8424  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8425  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8426  {
    8427  ++nextAlloc2ndIndex;
    8428  }
    8429 
    8430  // Found non-null allocation.
    8431  if(nextAlloc2ndIndex < suballoc2ndCount)
    8432  {
    8433  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8434 
    8435  // 1. Process free space before this allocation.
    8436  if(lastOffset < suballoc.offset)
    8437  {
    8438  // There is free space from lastOffset to suballoc.offset.
    8439  ++unusedRangeCount;
    8440  }
    8441 
    8442  // 2. Process this allocation.
    8443  // There is allocation with suballoc.offset, suballoc.size.
    8444  ++alloc2ndCount;
    8445  usedBytes += suballoc.size;
    8446 
    8447  // 3. Prepare for next iteration.
    8448  lastOffset = suballoc.offset + suballoc.size;
    8449  ++nextAlloc2ndIndex;
    8450  }
    8451  // We are at the end.
    8452  else
    8453  {
    8454  if(lastOffset < freeSpace2ndTo1stEnd)
    8455  {
    8456  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8457  ++unusedRangeCount;
    8458  }
    8459 
    8460  // End of loop.
    8461  lastOffset = freeSpace2ndTo1stEnd;
    8462  }
    8463  }
    8464  }
    8465 
    8466  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8467  size_t alloc1stCount = 0;
    8468  const VkDeviceSize freeSpace1stTo2ndEnd =
    8469  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8470  while(lastOffset < freeSpace1stTo2ndEnd)
    8471  {
    8472  // Find next non-null allocation or move nextAllocIndex to the end.
    8473  while(nextAlloc1stIndex < suballoc1stCount &&
    8474  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8475  {
    8476  ++nextAlloc1stIndex;
    8477  }
    8478 
    8479  // Found non-null allocation.
    8480  if(nextAlloc1stIndex < suballoc1stCount)
    8481  {
    8482  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8483 
    8484  // 1. Process free space before this allocation.
    8485  if(lastOffset < suballoc.offset)
    8486  {
    8487  // There is free space from lastOffset to suballoc.offset.
    8488  ++unusedRangeCount;
    8489  }
    8490 
    8491  // 2. Process this allocation.
    8492  // There is allocation with suballoc.offset, suballoc.size.
    8493  ++alloc1stCount;
    8494  usedBytes += suballoc.size;
    8495 
    8496  // 3. Prepare for next iteration.
    8497  lastOffset = suballoc.offset + suballoc.size;
    8498  ++nextAlloc1stIndex;
    8499  }
    8500  // We are at the end.
    8501  else
    8502  {
    8503  if(lastOffset < size)
    8504  {
    8505  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8506  ++unusedRangeCount;
    8507  }
    8508 
    8509  // End of loop.
    8510  lastOffset = freeSpace1stTo2ndEnd;
    8511  }
    8512  }
    8513 
    8514  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8515  {
    8516  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8517  while(lastOffset < size)
    8518  {
    8519  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8520  while(nextAlloc2ndIndex != SIZE_MAX &&
    8521  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8522  {
    8523  --nextAlloc2ndIndex;
    8524  }
    8525 
    8526  // Found non-null allocation.
    8527  if(nextAlloc2ndIndex != SIZE_MAX)
    8528  {
    8529  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8530 
    8531  // 1. Process free space before this allocation.
    8532  if(lastOffset < suballoc.offset)
    8533  {
    8534  // There is free space from lastOffset to suballoc.offset.
    8535  ++unusedRangeCount;
    8536  }
    8537 
    8538  // 2. Process this allocation.
    8539  // There is allocation with suballoc.offset, suballoc.size.
    8540  ++alloc2ndCount;
    8541  usedBytes += suballoc.size;
    8542 
    8543  // 3. Prepare for next iteration.
    8544  lastOffset = suballoc.offset + suballoc.size;
    8545  --nextAlloc2ndIndex;
    8546  }
    8547  // We are at the end.
    8548  else
    8549  {
    8550  if(lastOffset < size)
    8551  {
    8552  // There is free space from lastOffset to size.
    8553  ++unusedRangeCount;
    8554  }
    8555 
    8556  // End of loop.
    8557  lastOffset = size;
    8558  }
    8559  }
    8560  }
    8561 
    8562  const VkDeviceSize unusedBytes = size - usedBytes;
    8563  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8564 
    8565  // SECOND PASS
    8566  lastOffset = 0;
    8567 
    8568  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8569  {
    8570  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8571  size_t nextAlloc2ndIndex = 0;
    8572  while(lastOffset < freeSpace2ndTo1stEnd)
    8573  {
    8574  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8575  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8576  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8577  {
    8578  ++nextAlloc2ndIndex;
    8579  }
    8580 
    8581  // Found non-null allocation.
    8582  if(nextAlloc2ndIndex < suballoc2ndCount)
    8583  {
    8584  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8585 
    8586  // 1. Process free space before this allocation.
    8587  if(lastOffset < suballoc.offset)
    8588  {
    8589  // There is free space from lastOffset to suballoc.offset.
    8590  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8591  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8592  }
    8593 
    8594  // 2. Process this allocation.
    8595  // There is allocation with suballoc.offset, suballoc.size.
    8596  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8597 
    8598  // 3. Prepare for next iteration.
    8599  lastOffset = suballoc.offset + suballoc.size;
    8600  ++nextAlloc2ndIndex;
    8601  }
    8602  // We are at the end.
    8603  else
    8604  {
    8605  if(lastOffset < freeSpace2ndTo1stEnd)
    8606  {
    8607  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8608  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8609  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8610  }
    8611 
    8612  // End of loop.
    8613  lastOffset = freeSpace2ndTo1stEnd;
    8614  }
    8615  }
    8616  }
    8617 
    8618  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8619  while(lastOffset < freeSpace1stTo2ndEnd)
    8620  {
    8621  // Find next non-null allocation or move nextAllocIndex to the end.
    8622  while(nextAlloc1stIndex < suballoc1stCount &&
    8623  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8624  {
    8625  ++nextAlloc1stIndex;
    8626  }
    8627 
    8628  // Found non-null allocation.
    8629  if(nextAlloc1stIndex < suballoc1stCount)
    8630  {
    8631  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8632 
    8633  // 1. Process free space before this allocation.
    8634  if(lastOffset < suballoc.offset)
    8635  {
    8636  // There is free space from lastOffset to suballoc.offset.
    8637  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8638  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8639  }
    8640 
    8641  // 2. Process this allocation.
    8642  // There is allocation with suballoc.offset, suballoc.size.
    8643  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8644 
    8645  // 3. Prepare for next iteration.
    8646  lastOffset = suballoc.offset + suballoc.size;
    8647  ++nextAlloc1stIndex;
    8648  }
    8649  // We are at the end.
    8650  else
    8651  {
    8652  if(lastOffset < freeSpace1stTo2ndEnd)
    8653  {
    8654  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8655  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8656  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8657  }
    8658 
    8659  // End of loop.
    8660  lastOffset = freeSpace1stTo2ndEnd;
    8661  }
    8662  }
    8663 
    8664  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8665  {
    8666  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8667  while(lastOffset < size)
    8668  {
    8669  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8670  while(nextAlloc2ndIndex != SIZE_MAX &&
    8671  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8672  {
    8673  --nextAlloc2ndIndex;
    8674  }
    8675 
    8676  // Found non-null allocation.
    8677  if(nextAlloc2ndIndex != SIZE_MAX)
    8678  {
    8679  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8680 
    8681  // 1. Process free space before this allocation.
    8682  if(lastOffset < suballoc.offset)
    8683  {
    8684  // There is free space from lastOffset to suballoc.offset.
    8685  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8686  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8687  }
    8688 
    8689  // 2. Process this allocation.
    8690  // There is allocation with suballoc.offset, suballoc.size.
    8691  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8692 
    8693  // 3. Prepare for next iteration.
    8694  lastOffset = suballoc.offset + suballoc.size;
    8695  --nextAlloc2ndIndex;
    8696  }
    8697  // We are at the end.
    8698  else
    8699  {
    8700  if(lastOffset < size)
    8701  {
    8702  // There is free space from lastOffset to size.
    8703  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8704  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8705  }
    8706 
    8707  // End of loop.
    8708  lastOffset = size;
    8709  }
    8710  }
    8711  }
    8712 
    8713  PrintDetailedMap_End(json);
    8714 }
    8715 #endif // #if VMA_STATS_STRING_ENABLED
    8716 
    8717 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8718  uint32_t currentFrameIndex,
    8719  uint32_t frameInUseCount,
    8720  VkDeviceSize bufferImageGranularity,
    8721  VkDeviceSize allocSize,
    8722  VkDeviceSize allocAlignment,
    8723  bool upperAddress,
    8724  VmaSuballocationType allocType,
    8725  bool canMakeOtherLost,
    8726  uint32_t strategy,
    8727  VmaAllocationRequest* pAllocationRequest)
    8728 {
    8729  VMA_ASSERT(allocSize > 0);
    8730  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8731  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8732  VMA_HEAVY_ASSERT(Validate());
    8733 
    8734  const VkDeviceSize size = GetSize();
    8735  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8736  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8737 
    8738  if(upperAddress)
    8739  {
    8740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8741  {
    8742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8743  return false;
    8744  }
    8745 
    8746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8747  if(allocSize > size)
    8748  {
    8749  return false;
    8750  }
    8751  VkDeviceSize resultBaseOffset = size - allocSize;
    8752  if(!suballocations2nd.empty())
    8753  {
    8754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8755  resultBaseOffset = lastSuballoc.offset - allocSize;
    8756  if(allocSize > lastSuballoc.offset)
    8757  {
    8758  return false;
    8759  }
    8760  }
    8761 
    8762  // Start from offset equal to end of free space.
    8763  VkDeviceSize resultOffset = resultBaseOffset;
    8764 
    8765  // Apply VMA_DEBUG_MARGIN at the end.
    8766  if(VMA_DEBUG_MARGIN > 0)
    8767  {
    8768  if(resultOffset < VMA_DEBUG_MARGIN)
    8769  {
    8770  return false;
    8771  }
    8772  resultOffset -= VMA_DEBUG_MARGIN;
    8773  }
    8774 
    8775  // Apply alignment.
    8776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8777 
    8778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8779  // Make bigger alignment if necessary.
    8780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8781  {
    8782  bool bufferImageGranularityConflict = false;
    8783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8784  {
    8785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8787  {
    8788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8789  {
    8790  bufferImageGranularityConflict = true;
    8791  break;
    8792  }
    8793  }
    8794  else
    8795  // Already on previous page.
    8796  break;
    8797  }
    8798  if(bufferImageGranularityConflict)
    8799  {
    8800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8801  }
    8802  }
    8803 
    8804  // There is enough free space.
    8805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8806  suballocations1st.back().offset + suballocations1st.back().size :
    8807  0;
    8808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8809  {
    8810  // Check previous suballocations for BufferImageGranularity conflicts.
    8811  // If conflict exists, allocation cannot be made here.
    8812  if(bufferImageGranularity > 1)
    8813  {
    8814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8815  {
    8816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8818  {
    8819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8820  {
    8821  return false;
    8822  }
    8823  }
    8824  else
    8825  {
    8826  // Already on next page.
    8827  break;
    8828  }
    8829  }
    8830  }
    8831 
    8832  // All tests passed: Success.
    8833  pAllocationRequest->offset = resultOffset;
    8834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8835  pAllocationRequest->sumItemSize = 0;
    8836  // pAllocationRequest->item unused.
    8837  pAllocationRequest->itemsToMakeLostCount = 0;
    8838  return true;
    8839  }
    8840  }
    8841  else // !upperAddress
    8842  {
    8843  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8844  {
    8845  // Try to allocate at the end of 1st vector.
    8846 
    8847  VkDeviceSize resultBaseOffset = 0;
    8848  if(!suballocations1st.empty())
    8849  {
    8850  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8851  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8852  }
    8853 
    8854  // Start from offset equal to beginning of free space.
    8855  VkDeviceSize resultOffset = resultBaseOffset;
    8856 
    8857  // Apply VMA_DEBUG_MARGIN at the beginning.
    8858  if(VMA_DEBUG_MARGIN > 0)
    8859  {
    8860  resultOffset += VMA_DEBUG_MARGIN;
    8861  }
    8862 
    8863  // Apply alignment.
    8864  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8865 
    8866  // Check previous suballocations for BufferImageGranularity conflicts.
    8867  // Make bigger alignment if necessary.
    8868  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8869  {
    8870  bool bufferImageGranularityConflict = false;
    8871  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8872  {
    8873  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8874  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8875  {
    8876  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8877  {
    8878  bufferImageGranularityConflict = true;
    8879  break;
    8880  }
    8881  }
    8882  else
    8883  // Already on previous page.
    8884  break;
    8885  }
    8886  if(bufferImageGranularityConflict)
    8887  {
    8888  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8889  }
    8890  }
    8891 
    8892  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8893  suballocations2nd.back().offset : size;
    8894 
    8895  // There is enough free space at the end after alignment.
    8896  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8897  {
    8898  // Check next suballocations for BufferImageGranularity conflicts.
    8899  // If conflict exists, allocation cannot be made here.
    8900  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8901  {
    8902  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8903  {
    8904  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8905  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8906  {
    8907  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8908  {
    8909  return false;
    8910  }
    8911  }
    8912  else
    8913  {
    8914  // Already on previous page.
    8915  break;
    8916  }
    8917  }
    8918  }
    8919 
    8920  // All tests passed: Success.
    8921  pAllocationRequest->offset = resultOffset;
    8922  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8923  pAllocationRequest->sumItemSize = 0;
    8924  // pAllocationRequest->item unused.
    8925  pAllocationRequest->itemsToMakeLostCount = 0;
    8926  return true;
    8927  }
    8928  }
    8929 
    8930  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8931  // beginning of 1st vector as the end of free space.
    8932  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8933  {
    8934  VMA_ASSERT(!suballocations1st.empty());
    8935 
    8936  VkDeviceSize resultBaseOffset = 0;
    8937  if(!suballocations2nd.empty())
    8938  {
    8939  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8940  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8941  }
    8942 
    8943  // Start from offset equal to beginning of free space.
    8944  VkDeviceSize resultOffset = resultBaseOffset;
    8945 
    8946  // Apply VMA_DEBUG_MARGIN at the beginning.
    8947  if(VMA_DEBUG_MARGIN > 0)
    8948  {
    8949  resultOffset += VMA_DEBUG_MARGIN;
    8950  }
    8951 
    8952  // Apply alignment.
    8953  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8954 
    8955  // Check previous suballocations for BufferImageGranularity conflicts.
    8956  // Make bigger alignment if necessary.
    8957  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8958  {
    8959  bool bufferImageGranularityConflict = false;
    8960  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8961  {
    8962  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8963  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8964  {
    8965  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8966  {
    8967  bufferImageGranularityConflict = true;
    8968  break;
    8969  }
    8970  }
    8971  else
    8972  // Already on previous page.
    8973  break;
    8974  }
    8975  if(bufferImageGranularityConflict)
    8976  {
    8977  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8978  }
    8979  }
    8980 
    8981  pAllocationRequest->itemsToMakeLostCount = 0;
    8982  pAllocationRequest->sumItemSize = 0;
    8983  size_t index1st = m_1stNullItemsBeginCount;
    8984 
    8985  if(canMakeOtherLost)
    8986  {
    8987  while(index1st < suballocations1st.size() &&
    8988  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8989  {
    8990  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8991  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8992  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8993  {
    8994  // No problem.
    8995  }
    8996  else
    8997  {
    8998  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    8999  if(suballoc.hAllocation->CanBecomeLost() &&
    9000  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9001  {
    9002  ++pAllocationRequest->itemsToMakeLostCount;
    9003  pAllocationRequest->sumItemSize += suballoc.size;
    9004  }
    9005  else
    9006  {
    9007  return false;
    9008  }
    9009  }
    9010  ++index1st;
    9011  }
    9012 
    9013  // Check next suballocations for BufferImageGranularity conflicts.
    9014  // If conflict exists, we must mark more allocations lost or fail.
    9015  if(bufferImageGranularity > 1)
    9016  {
    9017  while(index1st < suballocations1st.size())
    9018  {
    9019  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9020  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9021  {
    9022  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9023  {
    9024  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9025  if(suballoc.hAllocation->CanBecomeLost() &&
    9026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9027  {
    9028  ++pAllocationRequest->itemsToMakeLostCount;
    9029  pAllocationRequest->sumItemSize += suballoc.size;
    9030  }
    9031  else
    9032  {
    9033  return false;
    9034  }
    9035  }
    9036  }
    9037  else
    9038  {
    9039  // Already on next page.
    9040  break;
    9041  }
    9042  ++index1st;
    9043  }
    9044  }
    9045  }
    9046 
    9047  // There is enough free space at the end after alignment.
    9048  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9049  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9050  {
    9051  // Check next suballocations for BufferImageGranularity conflicts.
    9052  // If conflict exists, allocation cannot be made here.
    9053  if(bufferImageGranularity > 1)
    9054  {
    9055  for(size_t nextSuballocIndex = index1st;
    9056  nextSuballocIndex < suballocations1st.size();
    9057  nextSuballocIndex++)
    9058  {
    9059  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9060  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9061  {
    9062  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9063  {
    9064  return false;
    9065  }
    9066  }
    9067  else
    9068  {
    9069  // Already on next page.
    9070  break;
    9071  }
    9072  }
    9073  }
    9074 
    9075  // All tests passed: Success.
    9076  pAllocationRequest->offset = resultOffset;
    9077  pAllocationRequest->sumFreeSize =
    9078  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9079  - resultBaseOffset
    9080  - pAllocationRequest->sumItemSize;
    9081  // pAllocationRequest->item unused.
    9082  return true;
    9083  }
    9084  }
    9085  }
    9086 
    9087  return false;
    9088 }
    9089 
    9090 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9091  uint32_t currentFrameIndex,
    9092  uint32_t frameInUseCount,
    9093  VmaAllocationRequest* pAllocationRequest)
    9094 {
    9095  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9096  {
    9097  return true;
    9098  }
    9099 
    9100  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9101 
    9102  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9103  size_t index1st = m_1stNullItemsBeginCount;
    9104  size_t madeLostCount = 0;
    9105  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9106  {
    9107  VMA_ASSERT(index1st < suballocations1st.size());
    9108  VmaSuballocation& suballoc = suballocations1st[index1st];
    9109  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9110  {
    9111  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9112  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9113  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9114  {
    9115  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9116  suballoc.hAllocation = VK_NULL_HANDLE;
    9117  m_SumFreeSize += suballoc.size;
    9118  ++m_1stNullItemsMiddleCount;
    9119  ++madeLostCount;
    9120  }
    9121  else
    9122  {
    9123  return false;
    9124  }
    9125  }
    9126  ++index1st;
    9127  }
    9128 
    9129  CleanupAfterFree();
    9130  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9131 
    9132  return true;
    9133 }
    9134 
    9135 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9136 {
    9137  uint32_t lostAllocationCount = 0;
    9138 
    9139  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9140  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9141  {
    9142  VmaSuballocation& suballoc = suballocations1st[i];
    9143  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9144  suballoc.hAllocation->CanBecomeLost() &&
    9145  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9146  {
    9147  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9148  suballoc.hAllocation = VK_NULL_HANDLE;
    9149  ++m_1stNullItemsMiddleCount;
    9150  m_SumFreeSize += suballoc.size;
    9151  ++lostAllocationCount;
    9152  }
    9153  }
    9154 
    9155  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9156  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9157  {
    9158  VmaSuballocation& suballoc = suballocations2nd[i];
    9159  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9160  suballoc.hAllocation->CanBecomeLost() &&
    9161  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9162  {
    9163  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9164  suballoc.hAllocation = VK_NULL_HANDLE;
    9165  ++m_2ndNullItemsCount;
    9166  ++lostAllocationCount;
    9167  }
    9168  }
    9169 
    9170  if(lostAllocationCount)
    9171  {
    9172  CleanupAfterFree();
    9173  }
    9174 
    9175  return lostAllocationCount;
    9176 }
    9177 
    9178 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9179 {
    9180  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9181  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9182  {
    9183  const VmaSuballocation& suballoc = suballocations1st[i];
    9184  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9185  {
    9186  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9187  {
    9188  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9189  return VK_ERROR_VALIDATION_FAILED_EXT;
    9190  }
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  }
    9197  }
    9198 
    9199  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9200  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9201  {
    9202  const VmaSuballocation& suballoc = suballocations2nd[i];
    9203  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9204  {
    9205  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9206  {
    9207  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9208  return VK_ERROR_VALIDATION_FAILED_EXT;
    9209  }
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  }
    9216  }
    9217 
    9218  return VK_SUCCESS;
    9219 }
    9220 
    9221 void VmaBlockMetadata_Linear::Alloc(
    9222  const VmaAllocationRequest& request,
    9223  VmaSuballocationType type,
    9224  VkDeviceSize allocSize,
    9225  bool upperAddress,
    9226  VmaAllocation hAllocation)
    9227 {
    9228  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9229 
    9230  if(upperAddress)
    9231  {
    9232  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9233  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9234  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9235  suballocations2nd.push_back(newSuballoc);
    9236  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9237  }
    9238  else
    9239  {
    9240  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9241 
    9242  // First allocation.
    9243  if(suballocations1st.empty())
    9244  {
    9245  suballocations1st.push_back(newSuballoc);
    9246  }
    9247  else
    9248  {
    9249  // New allocation at the end of 1st vector.
    9250  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9251  {
    9252  // Check if it fits before the end of the block.
    9253  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9254  suballocations1st.push_back(newSuballoc);
    9255  }
    9256  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9257  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9258  {
    9259  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9260 
    9261  switch(m_2ndVectorMode)
    9262  {
    9263  case SECOND_VECTOR_EMPTY:
    9264  // First allocation from second part ring buffer.
    9265  VMA_ASSERT(suballocations2nd.empty());
    9266  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9267  break;
    9268  case SECOND_VECTOR_RING_BUFFER:
    9269  // 2-part ring buffer is already started.
    9270  VMA_ASSERT(!suballocations2nd.empty());
    9271  break;
    9272  case SECOND_VECTOR_DOUBLE_STACK:
    9273  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9274  break;
    9275  default:
    9276  VMA_ASSERT(0);
    9277  }
    9278 
    9279  suballocations2nd.push_back(newSuballoc);
    9280  }
    9281  else
    9282  {
    9283  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9284  }
    9285  }
    9286  }
    9287 
    9288  m_SumFreeSize -= newSuballoc.size;
    9289 }
    9290 
    9291 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9292 {
    9293  FreeAtOffset(allocation->GetOffset());
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9297 {
    9298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9300 
    9301  if(!suballocations1st.empty())
    9302  {
    9303  // First allocation: Mark it as next empty at the beginning.
    9304  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9305  if(firstSuballoc.offset == offset)
    9306  {
    9307  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9308  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9309  m_SumFreeSize += firstSuballoc.size;
    9310  ++m_1stNullItemsBeginCount;
    9311  CleanupAfterFree();
    9312  return;
    9313  }
    9314  }
    9315 
    9316  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9317  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9318  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9319  {
    9320  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9321  if(lastSuballoc.offset == offset)
    9322  {
    9323  m_SumFreeSize += lastSuballoc.size;
    9324  suballocations2nd.pop_back();
    9325  CleanupAfterFree();
    9326  return;
    9327  }
    9328  }
    9329  // Last allocation in 1st vector.
    9330  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9331  {
    9332  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9333  if(lastSuballoc.offset == offset)
    9334  {
    9335  m_SumFreeSize += lastSuballoc.size;
    9336  suballocations1st.pop_back();
    9337  CleanupAfterFree();
    9338  return;
    9339  }
    9340  }
    9341 
    9342  // Item from the middle of 1st vector.
    9343  {
    9344  VmaSuballocation refSuballoc;
    9345  refSuballoc.offset = offset;
    9346  // Rest of members stays uninitialized intentionally for better performance.
    9347  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9348  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9349  suballocations1st.end(),
    9350  refSuballoc);
    9351  if(it != suballocations1st.end())
    9352  {
    9353  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9354  it->hAllocation = VK_NULL_HANDLE;
    9355  ++m_1stNullItemsMiddleCount;
    9356  m_SumFreeSize += it->size;
    9357  CleanupAfterFree();
    9358  return;
    9359  }
    9360  }
    9361 
    9362  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9363  {
    9364  // Item from the middle of 2nd vector.
    9365  VmaSuballocation refSuballoc;
    9366  refSuballoc.offset = offset;
    9367  // Rest of members stays uninitialized intentionally for better performance.
    9368  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9369  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9370  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9371  if(it != suballocations2nd.end())
    9372  {
    9373  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9374  it->hAllocation = VK_NULL_HANDLE;
    9375  ++m_2ndNullItemsCount;
    9376  m_SumFreeSize += it->size;
    9377  CleanupAfterFree();
    9378  return;
    9379  }
    9380  }
    9381 
    9382  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9383 }
    9384 
    9385 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9386 {
    9387  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9388  const size_t suballocCount = AccessSuballocations1st().size();
    9389  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9390 }
    9391 
    9392 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9393 {
    9394  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9395  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9396 
    9397  if(IsEmpty())
    9398  {
    9399  suballocations1st.clear();
    9400  suballocations2nd.clear();
    9401  m_1stNullItemsBeginCount = 0;
    9402  m_1stNullItemsMiddleCount = 0;
    9403  m_2ndNullItemsCount = 0;
    9404  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9405  }
    9406  else
    9407  {
    9408  const size_t suballoc1stCount = suballocations1st.size();
    9409  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9410  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9411 
    9412  // Find more null items at the beginning of 1st vector.
    9413  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9414  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9415  {
    9416  ++m_1stNullItemsBeginCount;
    9417  --m_1stNullItemsMiddleCount;
    9418  }
    9419 
    9420  // Find more null items at the end of 1st vector.
    9421  while(m_1stNullItemsMiddleCount > 0 &&
    9422  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9423  {
    9424  --m_1stNullItemsMiddleCount;
    9425  suballocations1st.pop_back();
    9426  }
    9427 
    9428  // Find more null items at the end of 2nd vector.
    9429  while(m_2ndNullItemsCount > 0 &&
    9430  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9431  {
    9432  --m_2ndNullItemsCount;
    9433  suballocations2nd.pop_back();
    9434  }
    9435 
    9436  if(ShouldCompact1st())
    9437  {
    9438  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9439  size_t srcIndex = m_1stNullItemsBeginCount;
    9440  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9441  {
    9442  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9443  {
    9444  ++srcIndex;
    9445  }
    9446  if(dstIndex != srcIndex)
    9447  {
    9448  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9449  }
    9450  ++srcIndex;
    9451  }
    9452  suballocations1st.resize(nonNullItemCount);
    9453  m_1stNullItemsBeginCount = 0;
    9454  m_1stNullItemsMiddleCount = 0;
    9455  }
    9456 
    9457  // 2nd vector became empty.
    9458  if(suballocations2nd.empty())
    9459  {
    9460  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9461  }
    9462 
    9463  // 1st vector became empty.
    9464  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9465  {
    9466  suballocations1st.clear();
    9467  m_1stNullItemsBeginCount = 0;
    9468 
    9469  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9470  {
    9471  // Swap 1st with 2nd. Now 2nd is empty.
    9472  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9473  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9474  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9475  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9476  {
    9477  ++m_1stNullItemsBeginCount;
    9478  --m_1stNullItemsMiddleCount;
    9479  }
    9480  m_2ndNullItemsCount = 0;
    9481  m_1stVectorIndex ^= 1;
    9482  }
    9483  }
    9484  }
    9485 
    9486  VMA_HEAVY_ASSERT(Validate());
    9487 }
    9488 
    9489 
    9491 // class VmaBlockMetadata_Buddy
    9492 
    9493 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9494  VmaBlockMetadata(hAllocator),
    9495  m_Root(VMA_NULL),
    9496  m_AllocationCount(0),
    9497  m_FreeCount(1),
    9498  m_SumFreeSize(0)
    9499 {
    9500  memset(m_FreeList, 0, sizeof(m_FreeList));
    9501 }
    9502 
    9503 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9504 {
    9505  DeleteNode(m_Root);
    9506 }
    9507 
    9508 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9509 {
    9510  VmaBlockMetadata::Init(size);
    9511 
    9512  m_UsableSize = VmaPrevPow2(size);
    9513  m_SumFreeSize = m_UsableSize;
    9514 
    9515  // Calculate m_LevelCount.
    9516  m_LevelCount = 1;
    9517  while(m_LevelCount < MAX_LEVELS &&
    9518  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9519  {
    9520  ++m_LevelCount;
    9521  }
    9522 
    9523  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9524  rootNode->offset = 0;
    9525  rootNode->type = Node::TYPE_FREE;
    9526  rootNode->parent = VMA_NULL;
    9527  rootNode->buddy = VMA_NULL;
    9528 
    9529  m_Root = rootNode;
    9530  AddToFreeListFront(0, rootNode);
    9531 }
    9532 
    9533 bool VmaBlockMetadata_Buddy::Validate() const
    9534 {
    9535  // Validate tree.
    9536  ValidationContext ctx;
    9537  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9538  {
    9539  VMA_VALIDATE(false && "ValidateNode failed.");
    9540  }
    9541  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9542  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9543 
    9544  // Validate free node lists.
    9545  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9546  {
    9547  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9548  m_FreeList[level].front->free.prev == VMA_NULL);
    9549 
    9550  for(Node* node = m_FreeList[level].front;
    9551  node != VMA_NULL;
    9552  node = node->free.next)
    9553  {
    9554  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9555 
    9556  if(node->free.next == VMA_NULL)
    9557  {
    9558  VMA_VALIDATE(m_FreeList[level].back == node);
    9559  }
    9560  else
    9561  {
    9562  VMA_VALIDATE(node->free.next->free.prev == node);
    9563  }
    9564  }
    9565  }
    9566 
    9567  // Validate that free lists ar higher levels are empty.
    9568  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9569  {
    9570  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9571  }
    9572 
    9573  return true;
    9574 }
    9575 
    9576 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9577 {
    9578  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9579  {
    9580  if(m_FreeList[level].front != VMA_NULL)
    9581  {
    9582  return LevelToNodeSize(level);
    9583  }
    9584  }
    9585  return 0;
    9586 }
    9587 
    9588 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9589 {
    9590  const VkDeviceSize unusableSize = GetUnusableSize();
    9591 
    9592  outInfo.blockCount = 1;
    9593 
    9594  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9595  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9596 
    9597  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9598  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9599  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9600 
    9601  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9602 
    9603  if(unusableSize > 0)
    9604  {
    9605  ++outInfo.unusedRangeCount;
    9606  outInfo.unusedBytes += unusableSize;
    9607  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9608  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9609  }
    9610 }
    9611 
    9612 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9613 {
    9614  const VkDeviceSize unusableSize = GetUnusableSize();
    9615 
    9616  inoutStats.size += GetSize();
    9617  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9618  inoutStats.allocationCount += m_AllocationCount;
    9619  inoutStats.unusedRangeCount += m_FreeCount;
    9620  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9621 
    9622  if(unusableSize > 0)
    9623  {
    9624  ++inoutStats.unusedRangeCount;
    9625  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9626  }
    9627 }
    9628 
    9629 #if VMA_STATS_STRING_ENABLED
    9630 
    9631 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9632 {
    9633  // TODO optimize
    9634  VmaStatInfo stat;
    9635  CalcAllocationStatInfo(stat);
    9636 
    9637  PrintDetailedMap_Begin(
    9638  json,
    9639  stat.unusedBytes,
    9640  stat.allocationCount,
    9641  stat.unusedRangeCount);
    9642 
    9643  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9644 
    9645  const VkDeviceSize unusableSize = GetUnusableSize();
    9646  if(unusableSize > 0)
    9647  {
    9648  PrintDetailedMap_UnusedRange(json,
    9649  m_UsableSize, // offset
    9650  unusableSize); // size
    9651  }
    9652 
    9653  PrintDetailedMap_End(json);
    9654 }
    9655 
    9656 #endif // #if VMA_STATS_STRING_ENABLED
    9657 
    9658 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9659  uint32_t currentFrameIndex,
    9660  uint32_t frameInUseCount,
    9661  VkDeviceSize bufferImageGranularity,
    9662  VkDeviceSize allocSize,
    9663  VkDeviceSize allocAlignment,
    9664  bool upperAddress,
    9665  VmaSuballocationType allocType,
    9666  bool canMakeOtherLost,
    9667  uint32_t strategy,
    9668  VmaAllocationRequest* pAllocationRequest)
    9669 {
    9670  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9671 
    9672  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9673  // Whenever it might be an OPTIMAL image...
    9674  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9675  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9676  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9677  {
    9678  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9679  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9680  }
    9681 
    9682  if(allocSize > m_UsableSize)
    9683  {
    9684  return false;
    9685  }
    9686 
    9687  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9688  for(uint32_t level = targetLevel + 1; level--; )
    9689  {
    9690  for(Node* freeNode = m_FreeList[level].front;
    9691  freeNode != VMA_NULL;
    9692  freeNode = freeNode->free.next)
    9693  {
    9694  if(freeNode->offset % allocAlignment == 0)
    9695  {
    9696  pAllocationRequest->offset = freeNode->offset;
    9697  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9698  pAllocationRequest->sumItemSize = 0;
    9699  pAllocationRequest->itemsToMakeLostCount = 0;
    9700  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9701  return true;
    9702  }
    9703  }
    9704  }
    9705 
    9706  return false;
    9707 }
    9708 
    9709 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9710  uint32_t currentFrameIndex,
    9711  uint32_t frameInUseCount,
    9712  VmaAllocationRequest* pAllocationRequest)
    9713 {
    9714  /*
    9715  Lost allocations are not supported in buddy allocator at the moment.
    9716  Support might be added in the future.
    9717  */
    9718  return pAllocationRequest->itemsToMakeLostCount == 0;
    9719 }
    9720 
    9721 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9722 {
    9723  /*
    9724  Lost allocations are not supported in buddy allocator at the moment.
    9725  Support might be added in the future.
    9726  */
    9727  return 0;
    9728 }
    9729 
    9730 void VmaBlockMetadata_Buddy::Alloc(
    9731  const VmaAllocationRequest& request,
    9732  VmaSuballocationType type,
    9733  VkDeviceSize allocSize,
    9734  bool upperAddress,
    9735  VmaAllocation hAllocation)
    9736 {
    9737  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9738  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9739 
    9740  Node* currNode = m_FreeList[currLevel].front;
    9741  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9742  while(currNode->offset != request.offset)
    9743  {
    9744  currNode = currNode->free.next;
    9745  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9746  }
    9747 
    9748  // Go down, splitting free nodes.
    9749  while(currLevel < targetLevel)
    9750  {
    9751  // currNode is already first free node at currLevel.
    9752  // Remove it from list of free nodes at this currLevel.
    9753  RemoveFromFreeList(currLevel, currNode);
    9754 
    9755  const uint32_t childrenLevel = currLevel + 1;
    9756 
    9757  // Create two free sub-nodes.
    9758  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9759  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9760 
    9761  leftChild->offset = currNode->offset;
    9762  leftChild->type = Node::TYPE_FREE;
    9763  leftChild->parent = currNode;
    9764  leftChild->buddy = rightChild;
    9765 
    9766  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9767  rightChild->type = Node::TYPE_FREE;
    9768  rightChild->parent = currNode;
    9769  rightChild->buddy = leftChild;
    9770 
    9771  // Convert current currNode to split type.
    9772  currNode->type = Node::TYPE_SPLIT;
    9773  currNode->split.leftChild = leftChild;
    9774 
    9775  // Add child nodes to free list. Order is important!
    9776  AddToFreeListFront(childrenLevel, rightChild);
    9777  AddToFreeListFront(childrenLevel, leftChild);
    9778 
    9779  ++m_FreeCount;
    9780  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9781  ++currLevel;
    9782  currNode = m_FreeList[currLevel].front;
    9783 
    9784  /*
    9785  We can be sure that currNode, as left child of node previously split,
    9786  also fullfills the alignment requirement.
    9787  */
    9788  }
    9789 
    9790  // Remove from free list.
    9791  VMA_ASSERT(currLevel == targetLevel &&
    9792  currNode != VMA_NULL &&
    9793  currNode->type == Node::TYPE_FREE);
    9794  RemoveFromFreeList(currLevel, currNode);
    9795 
    9796  // Convert to allocation node.
    9797  currNode->type = Node::TYPE_ALLOCATION;
    9798  currNode->allocation.alloc = hAllocation;
    9799 
    9800  ++m_AllocationCount;
    9801  --m_FreeCount;
    9802  m_SumFreeSize -= allocSize;
    9803 }
    9804 
    9805 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9806 {
    9807  if(node->type == Node::TYPE_SPLIT)
    9808  {
    9809  DeleteNode(node->split.leftChild->buddy);
    9810  DeleteNode(node->split.leftChild);
    9811  }
    9812 
    9813  vma_delete(GetAllocationCallbacks(), node);
    9814 }
    9815 
    9816 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9817 {
    9818  VMA_VALIDATE(level < m_LevelCount);
    9819  VMA_VALIDATE(curr->parent == parent);
    9820  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9821  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9822  switch(curr->type)
    9823  {
    9824  case Node::TYPE_FREE:
    9825  // curr->free.prev, next are validated separately.
    9826  ctx.calculatedSumFreeSize += levelNodeSize;
    9827  ++ctx.calculatedFreeCount;
    9828  break;
    9829  case Node::TYPE_ALLOCATION:
    9830  ++ctx.calculatedAllocationCount;
    9831  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9832  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9833  break;
    9834  case Node::TYPE_SPLIT:
    9835  {
    9836  const uint32_t childrenLevel = level + 1;
    9837  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9838  const Node* const leftChild = curr->split.leftChild;
    9839  VMA_VALIDATE(leftChild != VMA_NULL);
    9840  VMA_VALIDATE(leftChild->offset == curr->offset);
    9841  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9842  {
    9843  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9844  }
    9845  const Node* const rightChild = leftChild->buddy;
    9846  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9847  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9850  }
    9851  }
    9852  break;
    9853  default:
    9854  return false;
    9855  }
    9856 
    9857  return true;
    9858 }
    9859 
    9860 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9861 {
    9862  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9863  uint32_t level = 0;
    9864  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9865  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9866  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9867  {
    9868  ++level;
    9869  currLevelNodeSize = nextLevelNodeSize;
    9870  nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  }
    9872  return level;
    9873 }
    9874 
    9875 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9876 {
    9877  // Find node and level.
    9878  Node* node = m_Root;
    9879  VkDeviceSize nodeOffset = 0;
    9880  uint32_t level = 0;
    9881  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9882  while(node->type == Node::TYPE_SPLIT)
    9883  {
    9884  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9885  if(offset < nodeOffset + nextLevelSize)
    9886  {
    9887  node = node->split.leftChild;
    9888  }
    9889  else
    9890  {
    9891  node = node->split.leftChild->buddy;
    9892  nodeOffset += nextLevelSize;
    9893  }
    9894  ++level;
    9895  levelNodeSize = nextLevelSize;
    9896  }
    9897 
    9898  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9899  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9900 
    9901  ++m_FreeCount;
    9902  --m_AllocationCount;
    9903  m_SumFreeSize += alloc->GetSize();
    9904 
    9905  node->type = Node::TYPE_FREE;
    9906 
    9907  // Join free nodes if possible.
    9908  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9909  {
    9910  RemoveFromFreeList(level, node->buddy);
    9911  Node* const parent = node->parent;
    9912 
    9913  vma_delete(GetAllocationCallbacks(), node->buddy);
    9914  vma_delete(GetAllocationCallbacks(), node);
    9915  parent->type = Node::TYPE_FREE;
    9916 
    9917  node = parent;
    9918  --level;
    9919  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9920  --m_FreeCount;
    9921  }
    9922 
    9923  AddToFreeListFront(level, node);
    9924 }
    9925 
    9926 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9927 {
    9928  switch(node->type)
    9929  {
    9930  case Node::TYPE_FREE:
    9931  ++outInfo.unusedRangeCount;
    9932  outInfo.unusedBytes += levelNodeSize;
    9933  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9934  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9935  break;
    9936  case Node::TYPE_ALLOCATION:
    9937  {
    9938  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9939  ++outInfo.allocationCount;
    9940  outInfo.usedBytes += allocSize;
    9941  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9942  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9943 
    9944  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9945  if(unusedRangeSize > 0)
    9946  {
    9947  ++outInfo.unusedRangeCount;
    9948  outInfo.unusedBytes += unusedRangeSize;
    9949  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9950  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9951  }
    9952  }
    9953  break;
    9954  case Node::TYPE_SPLIT:
    9955  {
    9956  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9957  const Node* const leftChild = node->split.leftChild;
    9958  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9959  const Node* const rightChild = leftChild->buddy;
    9960  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9961  }
    9962  break;
    9963  default:
    9964  VMA_ASSERT(0);
    9965  }
    9966 }
    9967 
    9968 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9969 {
    9970  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9971 
    9972  // List is empty.
    9973  Node* const frontNode = m_FreeList[level].front;
    9974  if(frontNode == VMA_NULL)
    9975  {
    9976  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9977  node->free.prev = node->free.next = VMA_NULL;
    9978  m_FreeList[level].front = m_FreeList[level].back = node;
    9979  }
    9980  else
    9981  {
    9982  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9983  node->free.prev = VMA_NULL;
    9984  node->free.next = frontNode;
    9985  frontNode->free.prev = node;
    9986  m_FreeList[level].front = node;
    9987  }
    9988 }
    9989 
    9990 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9991 {
    9992  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9993 
    9994  // It is at the front.
    9995  if(node->free.prev == VMA_NULL)
    9996  {
    9997  VMA_ASSERT(m_FreeList[level].front == node);
    9998  m_FreeList[level].front = node->free.next;
    9999  }
    10000  else
    10001  {
    10002  Node* const prevFreeNode = node->free.prev;
    10003  VMA_ASSERT(prevFreeNode->free.next == node);
    10004  prevFreeNode->free.next = node->free.next;
    10005  }
    10006 
    10007  // It is at the back.
    10008  if(node->free.next == VMA_NULL)
    10009  {
    10010  VMA_ASSERT(m_FreeList[level].back == node);
    10011  m_FreeList[level].back = node->free.prev;
    10012  }
    10013  else
    10014  {
    10015  Node* const nextFreeNode = node->free.next;
    10016  VMA_ASSERT(nextFreeNode->free.prev == node);
    10017  nextFreeNode->free.prev = node->free.prev;
    10018  }
    10019 }
    10020 
    10021 #if VMA_STATS_STRING_ENABLED
    10022 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10023 {
    10024  switch(node->type)
    10025  {
    10026  case Node::TYPE_FREE:
    10027  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10028  break;
    10029  case Node::TYPE_ALLOCATION:
    10030  {
    10031  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10032  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10033  if(allocSize < levelNodeSize)
    10034  {
    10035  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10036  }
    10037  }
    10038  break;
    10039  case Node::TYPE_SPLIT:
    10040  {
    10041  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10042  const Node* const leftChild = node->split.leftChild;
    10043  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10044  const Node* const rightChild = leftChild->buddy;
    10045  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10046  }
    10047  break;
    10048  default:
    10049  VMA_ASSERT(0);
    10050  }
    10051 }
    10052 #endif // #if VMA_STATS_STRING_ENABLED
    10053 
    10054 
    10056 // class VmaDeviceMemoryBlock
    10057 
    10058 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10059  m_pMetadata(VMA_NULL),
    10060  m_MemoryTypeIndex(UINT32_MAX),
    10061  m_Id(0),
    10062  m_hMemory(VK_NULL_HANDLE),
    10063  m_MapCount(0),
    10064  m_pMappedData(VMA_NULL)
    10065 {
    10066 }
    10067 
    10068 void VmaDeviceMemoryBlock::Init(
    10069  VmaAllocator hAllocator,
    10070  uint32_t newMemoryTypeIndex,
    10071  VkDeviceMemory newMemory,
    10072  VkDeviceSize newSize,
    10073  uint32_t id,
    10074  uint32_t algorithm)
    10075 {
    10076  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10077 
    10078  m_MemoryTypeIndex = newMemoryTypeIndex;
    10079  m_Id = id;
    10080  m_hMemory = newMemory;
    10081 
    10082  switch(algorithm)
    10083  {
    10085  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10086  break;
    10088  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10089  break;
    10090  default:
    10091  VMA_ASSERT(0);
    10092  // Fall-through.
    10093  case 0:
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10095  }
    10096  m_pMetadata->Init(newSize);
    10097 }
    10098 
    10099 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10100 {
    10101  // This is the most important assert in the entire library.
    10102  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10103  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10104 
    10105  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10106  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10107  m_hMemory = VK_NULL_HANDLE;
    10108 
    10109  vma_delete(allocator, m_pMetadata);
    10110  m_pMetadata = VMA_NULL;
    10111 }
    10112 
    10113 bool VmaDeviceMemoryBlock::Validate() const
    10114 {
    10115  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10116  (m_pMetadata->GetSize() != 0));
    10117 
    10118  return m_pMetadata->Validate();
    10119 }
    10120 
    10121 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10122 {
    10123  void* pData = nullptr;
    10124  VkResult res = Map(hAllocator, 1, &pData);
    10125  if(res != VK_SUCCESS)
    10126  {
    10127  return res;
    10128  }
    10129 
    10130  res = m_pMetadata->CheckCorruption(pData);
    10131 
    10132  Unmap(hAllocator, 1);
    10133 
    10134  return res;
    10135 }
    10136 
    10137 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10138 {
    10139  if(count == 0)
    10140  {
    10141  return VK_SUCCESS;
    10142  }
    10143 
    10144  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10145  if(m_MapCount != 0)
    10146  {
    10147  m_MapCount += count;
    10148  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10149  if(ppData != VMA_NULL)
    10150  {
    10151  *ppData = m_pMappedData;
    10152  }
    10153  return VK_SUCCESS;
    10154  }
    10155  else
    10156  {
    10157  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10158  hAllocator->m_hDevice,
    10159  m_hMemory,
    10160  0, // offset
    10161  VK_WHOLE_SIZE,
    10162  0, // flags
    10163  &m_pMappedData);
    10164  if(result == VK_SUCCESS)
    10165  {
    10166  if(ppData != VMA_NULL)
    10167  {
    10168  *ppData = m_pMappedData;
    10169  }
    10170  m_MapCount = count;
    10171  }
    10172  return result;
    10173  }
    10174 }
    10175 
    10176 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10177 {
    10178  if(count == 0)
    10179  {
    10180  return;
    10181  }
    10182 
    10183  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10184  if(m_MapCount >= count)
    10185  {
    10186  m_MapCount -= count;
    10187  if(m_MapCount == 0)
    10188  {
    10189  m_pMappedData = VMA_NULL;
    10190  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10191  }
    10192  }
    10193  else
    10194  {
    10195  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10196  }
    10197 }
    10198 
    10199 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10200 {
    10201  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10202  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10203 
    10204  void* pData;
    10205  VkResult res = Map(hAllocator, 1, &pData);
    10206  if(res != VK_SUCCESS)
    10207  {
    10208  return res;
    10209  }
    10210 
    10211  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10212  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10213 
    10214  Unmap(hAllocator, 1);
    10215 
    10216  return VK_SUCCESS;
    10217 }
    10218 
    10219 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10220 {
    10221  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10222  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10223 
    10224  void* pData;
    10225  VkResult res = Map(hAllocator, 1, &pData);
    10226  if(res != VK_SUCCESS)
    10227  {
    10228  return res;
    10229  }
    10230 
    10231  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10232  {
    10233  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10234  }
    10235  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10236  {
    10237  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10238  }
    10239 
    10240  Unmap(hAllocator, 1);
    10241 
    10242  return VK_SUCCESS;
    10243 }
    10244 
    10245 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10246  const VmaAllocator hAllocator,
    10247  const VmaAllocation hAllocation,
    10248  VkBuffer hBuffer)
    10249 {
    10250  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10251  hAllocation->GetBlock() == this);
    10252  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10253  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10254  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10255  hAllocator->m_hDevice,
    10256  hBuffer,
    10257  m_hMemory,
    10258  hAllocation->GetOffset());
    10259 }
    10260 
    10261 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10262  const VmaAllocator hAllocator,
    10263  const VmaAllocation hAllocation,
    10264  VkImage hImage)
    10265 {
    10266  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10267  hAllocation->GetBlock() == this);
    10268  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10269  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10270  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10271  hAllocator->m_hDevice,
    10272  hImage,
    10273  m_hMemory,
    10274  hAllocation->GetOffset());
    10275 }
    10276 
    10277 static void InitStatInfo(VmaStatInfo& outInfo)
    10278 {
    10279  memset(&outInfo, 0, sizeof(outInfo));
    10280  outInfo.allocationSizeMin = UINT64_MAX;
    10281  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10282 }
    10283 
    10284 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10285 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10286 {
    10287  inoutInfo.blockCount += srcInfo.blockCount;
    10288  inoutInfo.allocationCount += srcInfo.allocationCount;
    10289  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10290  inoutInfo.usedBytes += srcInfo.usedBytes;
    10291  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10292  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10293  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10294  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10295  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10296 }
    10297 
    10298 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10299 {
    10300  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10301  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10302  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10304 }
    10305 
    10306 VmaPool_T::VmaPool_T(
    10307  VmaAllocator hAllocator,
    10308  const VmaPoolCreateInfo& createInfo,
    10309  VkDeviceSize preferredBlockSize) :
    10310  m_BlockVector(
    10311  hAllocator,
    10312  createInfo.memoryTypeIndex,
    10313  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10314  createInfo.minBlockCount,
    10315  createInfo.maxBlockCount,
    10316  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10317  createInfo.frameInUseCount,
    10318  true, // isCustomPool
    10319  createInfo.blockSize != 0, // explicitBlockSize
    10320  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10321  m_Id(0)
    10322 {
    10323 }
    10324 
    10325 VmaPool_T::~VmaPool_T()
    10326 {
    10327 }
    10328 
    10329 #if VMA_STATS_STRING_ENABLED
    10330 
    10331 #endif // #if VMA_STATS_STRING_ENABLED
    10332 
    10333 VmaBlockVector::VmaBlockVector(
    10334  VmaAllocator hAllocator,
    10335  uint32_t memoryTypeIndex,
    10336  VkDeviceSize preferredBlockSize,
    10337  size_t minBlockCount,
    10338  size_t maxBlockCount,
    10339  VkDeviceSize bufferImageGranularity,
    10340  uint32_t frameInUseCount,
    10341  bool isCustomPool,
    10342  bool explicitBlockSize,
    10343  uint32_t algorithm) :
    10344  m_hAllocator(hAllocator),
    10345  m_MemoryTypeIndex(memoryTypeIndex),
    10346  m_PreferredBlockSize(preferredBlockSize),
    10347  m_MinBlockCount(minBlockCount),
    10348  m_MaxBlockCount(maxBlockCount),
    10349  m_BufferImageGranularity(bufferImageGranularity),
    10350  m_FrameInUseCount(frameInUseCount),
    10351  m_IsCustomPool(isCustomPool),
    10352  m_ExplicitBlockSize(explicitBlockSize),
    10353  m_Algorithm(algorithm),
    10354  m_HasEmptyBlock(false),
    10355  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10356  m_pDefragmentator(VMA_NULL),
    10357  m_NextBlockId(0)
    10358 {
    10359 }
    10360 
    10361 VmaBlockVector::~VmaBlockVector()
    10362 {
    10363  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10364 
    10365  for(size_t i = m_Blocks.size(); i--; )
    10366  {
    10367  m_Blocks[i]->Destroy(m_hAllocator);
    10368  vma_delete(m_hAllocator, m_Blocks[i]);
    10369  }
    10370 }
    10371 
    10372 VkResult VmaBlockVector::CreateMinBlocks()
    10373 {
    10374  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10375  {
    10376  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10377  if(res != VK_SUCCESS)
    10378  {
    10379  return res;
    10380  }
    10381  }
    10382  return VK_SUCCESS;
    10383 }
    10384 
    10385 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10386 {
    10387  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10388 
    10389  const size_t blockCount = m_Blocks.size();
    10390 
    10391  pStats->size = 0;
    10392  pStats->unusedSize = 0;
    10393  pStats->allocationCount = 0;
    10394  pStats->unusedRangeCount = 0;
    10395  pStats->unusedRangeSizeMax = 0;
    10396  pStats->blockCount = blockCount;
    10397 
    10398  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10399  {
    10400  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10401  VMA_ASSERT(pBlock);
    10402  VMA_HEAVY_ASSERT(pBlock->Validate());
    10403  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10404  }
    10405 }
    10406 
    10407 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10408 {
    10409  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10410  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10411  (VMA_DEBUG_MARGIN > 0) &&
    10412  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10413 }
    10414 
    10415 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10416 
    10417 VkResult VmaBlockVector::Allocate(
    10418  VmaPool hCurrentPool,
    10419  uint32_t currentFrameIndex,
    10420  VkDeviceSize size,
    10421  VkDeviceSize alignment,
    10422  const VmaAllocationCreateInfo& createInfo,
    10423  VmaSuballocationType suballocType,
    10424  VmaAllocation* pAllocation)
    10425 {
    10426  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10427  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10428  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10429  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10430  const bool canCreateNewBlock =
    10431  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10432  (m_Blocks.size() < m_MaxBlockCount);
    10433  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10434 
    10435  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10436  // Which in turn is available only when maxBlockCount = 1.
    10437  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10438  {
    10439  canMakeOtherLost = false;
    10440  }
    10441 
    10442  // Upper address can only be used with linear allocator and within single memory block.
    10443  if(isUpperAddress &&
    10444  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10445  {
    10446  return VK_ERROR_FEATURE_NOT_PRESENT;
    10447  }
    10448 
    10449  // Validate strategy.
    10450  switch(strategy)
    10451  {
    10452  case 0:
    10454  break;
    10458  break;
    10459  default:
    10460  return VK_ERROR_FEATURE_NOT_PRESENT;
    10461  }
    10462 
    10463  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10464  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10465  {
    10466  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10467  }
    10468 
    10469  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10470 
    10471  /*
    10472  Under certain condition, this whole section can be skipped for optimization, so
    10473  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10474  e.g. for custom pools with linear algorithm.
    10475  */
    10476  if(!canMakeOtherLost || canCreateNewBlock)
    10477  {
    10478  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10479  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10481 
    10482  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10483  {
    10484  // Use only last block.
    10485  if(!m_Blocks.empty())
    10486  {
    10487  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10488  VMA_ASSERT(pCurrBlock);
    10489  VkResult res = AllocateFromBlock(
    10490  pCurrBlock,
    10491  hCurrentPool,
    10492  currentFrameIndex,
    10493  size,
    10494  alignment,
    10495  allocFlagsCopy,
    10496  createInfo.pUserData,
    10497  suballocType,
    10498  strategy,
    10499  pAllocation);
    10500  if(res == VK_SUCCESS)
    10501  {
    10502  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10503  return VK_SUCCESS;
    10504  }
    10505  }
    10506  }
    10507  else
    10508  {
    10510  {
    10511  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10512  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10513  {
    10514  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10515  VMA_ASSERT(pCurrBlock);
    10516  VkResult res = AllocateFromBlock(
    10517  pCurrBlock,
    10518  hCurrentPool,
    10519  currentFrameIndex,
    10520  size,
    10521  alignment,
    10522  allocFlagsCopy,
    10523  createInfo.pUserData,
    10524  suballocType,
    10525  strategy,
    10526  pAllocation);
    10527  if(res == VK_SUCCESS)
    10528  {
    10529  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10530  return VK_SUCCESS;
    10531  }
    10532  }
    10533  }
    10534  else // WORST_FIT, FIRST_FIT
    10535  {
    10536  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10537  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10538  {
    10539  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10540  VMA_ASSERT(pCurrBlock);
    10541  VkResult res = AllocateFromBlock(
    10542  pCurrBlock,
    10543  hCurrentPool,
    10544  currentFrameIndex,
    10545  size,
    10546  alignment,
    10547  allocFlagsCopy,
    10548  createInfo.pUserData,
    10549  suballocType,
    10550  strategy,
    10551  pAllocation);
    10552  if(res == VK_SUCCESS)
    10553  {
    10554  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10555  return VK_SUCCESS;
    10556  }
    10557  }
    10558  }
    10559  }
    10560 
    10561  // 2. Try to create new block.
    10562  if(canCreateNewBlock)
    10563  {
    10564  // Calculate optimal size for new block.
    10565  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10566  uint32_t newBlockSizeShift = 0;
    10567  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10568 
    10569  if(!m_ExplicitBlockSize)
    10570  {
    10571  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10572  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10573  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10574  {
    10575  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10576  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10577  {
    10578  newBlockSize = smallerNewBlockSize;
    10579  ++newBlockSizeShift;
    10580  }
    10581  else
    10582  {
    10583  break;
    10584  }
    10585  }
    10586  }
    10587 
    10588  size_t newBlockIndex = 0;
    10589  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10590  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10591  if(!m_ExplicitBlockSize)
    10592  {
    10593  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10594  {
    10595  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10596  if(smallerNewBlockSize >= size)
    10597  {
    10598  newBlockSize = smallerNewBlockSize;
    10599  ++newBlockSizeShift;
    10600  res = CreateBlock(newBlockSize, &newBlockIndex);
    10601  }
    10602  else
    10603  {
    10604  break;
    10605  }
    10606  }
    10607  }
    10608 
    10609  if(res == VK_SUCCESS)
    10610  {
    10611  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10612  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10613 
    10614  res = AllocateFromBlock(
    10615  pBlock,
    10616  hCurrentPool,
    10617  currentFrameIndex,
    10618  size,
    10619  alignment,
    10620  allocFlagsCopy,
    10621  createInfo.pUserData,
    10622  suballocType,
    10623  strategy,
    10624  pAllocation);
    10625  if(res == VK_SUCCESS)
    10626  {
    10627  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10628  return VK_SUCCESS;
    10629  }
    10630  else
    10631  {
    10632  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10633  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10634  }
    10635  }
    10636  }
    10637  }
    10638 
    10639  // 3. Try to allocate from existing blocks with making other allocations lost.
    10640  if(canMakeOtherLost)
    10641  {
    10642  uint32_t tryIndex = 0;
    10643  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10644  {
    10645  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10646  VmaAllocationRequest bestRequest = {};
    10647  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10648 
    10649  // 1. Search existing allocations.
    10651  {
    10652  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10653  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10654  {
    10655  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10656  VMA_ASSERT(pCurrBlock);
    10657  VmaAllocationRequest currRequest = {};
    10658  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10659  currentFrameIndex,
    10660  m_FrameInUseCount,
    10661  m_BufferImageGranularity,
    10662  size,
    10663  alignment,
    10664  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10665  suballocType,
    10666  canMakeOtherLost,
    10667  strategy,
    10668  &currRequest))
    10669  {
    10670  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10671  if(pBestRequestBlock == VMA_NULL ||
    10672  currRequestCost < bestRequestCost)
    10673  {
    10674  pBestRequestBlock = pCurrBlock;
    10675  bestRequest = currRequest;
    10676  bestRequestCost = currRequestCost;
    10677 
    10678  if(bestRequestCost == 0)
    10679  {
    10680  break;
    10681  }
    10682  }
    10683  }
    10684  }
    10685  }
    10686  else // WORST_FIT, FIRST_FIT
    10687  {
    10688  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10689  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10690  {
    10691  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10692  VMA_ASSERT(pCurrBlock);
    10693  VmaAllocationRequest currRequest = {};
    10694  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10695  currentFrameIndex,
    10696  m_FrameInUseCount,
    10697  m_BufferImageGranularity,
    10698  size,
    10699  alignment,
    10700  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10701  suballocType,
    10702  canMakeOtherLost,
    10703  strategy,
    10704  &currRequest))
    10705  {
    10706  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10707  if(pBestRequestBlock == VMA_NULL ||
    10708  currRequestCost < bestRequestCost ||
    10710  {
    10711  pBestRequestBlock = pCurrBlock;
    10712  bestRequest = currRequest;
    10713  bestRequestCost = currRequestCost;
    10714 
    10715  if(bestRequestCost == 0 ||
    10717  {
    10718  break;
    10719  }
    10720  }
    10721  }
    10722  }
    10723  }
    10724 
    10725  if(pBestRequestBlock != VMA_NULL)
    10726  {
    10727  if(mapped)
    10728  {
    10729  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10730  if(res != VK_SUCCESS)
    10731  {
    10732  return res;
    10733  }
    10734  }
    10735 
    10736  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10737  currentFrameIndex,
    10738  m_FrameInUseCount,
    10739  &bestRequest))
    10740  {
    10741  // We no longer have an empty Allocation.
    10742  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10743  {
    10744  m_HasEmptyBlock = false;
    10745  }
    10746  // Allocate from this pBlock.
    10747  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10748  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10749  (*pAllocation)->InitBlockAllocation(
    10750  hCurrentPool,
    10751  pBestRequestBlock,
    10752  bestRequest.offset,
    10753  alignment,
    10754  size,
    10755  suballocType,
    10756  mapped,
    10757  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10758  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10759  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10760  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10761  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10762  {
    10763  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10764  }
    10765  if(IsCorruptionDetectionEnabled())
    10766  {
    10767  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10768  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10769  }
    10770  return VK_SUCCESS;
    10771  }
    10772  // else: Some allocations must have been touched while we are here. Next try.
    10773  }
    10774  else
    10775  {
    10776  // Could not find place in any of the blocks - break outer loop.
    10777  break;
    10778  }
    10779  }
    10780  /* Maximum number of tries exceeded - a very unlike event when many other
    10781  threads are simultaneously touching allocations making it impossible to make
    10782  lost at the same time as we try to allocate. */
    10783  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10784  {
    10785  return VK_ERROR_TOO_MANY_OBJECTS;
    10786  }
    10787  }
    10788 
    10789  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10790 }
    10791 
    10792 void VmaBlockVector::Free(
    10793  VmaAllocation hAllocation)
    10794 {
    10795  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10796 
    10797  // Scope for lock.
    10798  {
    10799  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10800 
    10801  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10802 
    10803  if(IsCorruptionDetectionEnabled())
    10804  {
    10805  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10806  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10807  }
    10808 
    10809  if(hAllocation->IsPersistentMap())
    10810  {
    10811  pBlock->Unmap(m_hAllocator, 1);
    10812  }
    10813 
    10814  pBlock->m_pMetadata->Free(hAllocation);
    10815  VMA_HEAVY_ASSERT(pBlock->Validate());
    10816 
    10817  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10818 
    10819  // pBlock became empty after this deallocation.
    10820  if(pBlock->m_pMetadata->IsEmpty())
    10821  {
    10822  // Already has empty Allocation. We don't want to have two, so delete this one.
    10823  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10824  {
    10825  pBlockToDelete = pBlock;
    10826  Remove(pBlock);
    10827  }
    10828  // We now have first empty block.
    10829  else
    10830  {
    10831  m_HasEmptyBlock = true;
    10832  }
    10833  }
    10834  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10835  // (This is optional, heuristics.)
    10836  else if(m_HasEmptyBlock)
    10837  {
    10838  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10839  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10840  {
    10841  pBlockToDelete = pLastBlock;
    10842  m_Blocks.pop_back();
    10843  m_HasEmptyBlock = false;
    10844  }
    10845  }
    10846 
    10847  IncrementallySortBlocks();
    10848  }
    10849 
    10850  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10851  // lock, for performance reason.
    10852  if(pBlockToDelete != VMA_NULL)
    10853  {
    10854  VMA_DEBUG_LOG(" Deleted empty allocation");
    10855  pBlockToDelete->Destroy(m_hAllocator);
    10856  vma_delete(m_hAllocator, pBlockToDelete);
    10857  }
    10858 }
    10859 
    10860 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10861 {
    10862  VkDeviceSize result = 0;
    10863  for(size_t i = m_Blocks.size(); i--; )
    10864  {
    10865  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10866  if(result >= m_PreferredBlockSize)
    10867  {
    10868  break;
    10869  }
    10870  }
    10871  return result;
    10872 }
    10873 
    10874 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10875 {
    10876  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10877  {
    10878  if(m_Blocks[blockIndex] == pBlock)
    10879  {
    10880  VmaVectorRemove(m_Blocks, blockIndex);
    10881  return;
    10882  }
    10883  }
    10884  VMA_ASSERT(0);
    10885 }
    10886 
    10887 void VmaBlockVector::IncrementallySortBlocks()
    10888 {
    10889  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10890  {
    10891  // Bubble sort only until first swap.
    10892  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10893  {
    10894  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10895  {
    10896  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10897  return;
    10898  }
    10899  }
    10900  }
    10901 }
    10902 
    10903 VkResult VmaBlockVector::AllocateFromBlock(
    10904  VmaDeviceMemoryBlock* pBlock,
    10905  VmaPool hCurrentPool,
    10906  uint32_t currentFrameIndex,
    10907  VkDeviceSize size,
    10908  VkDeviceSize alignment,
    10909  VmaAllocationCreateFlags allocFlags,
    10910  void* pUserData,
    10911  VmaSuballocationType suballocType,
    10912  uint32_t strategy,
    10913  VmaAllocation* pAllocation)
    10914 {
    10915  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10916  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10917  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10918  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10919 
    10920  VmaAllocationRequest currRequest = {};
    10921  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10922  currentFrameIndex,
    10923  m_FrameInUseCount,
    10924  m_BufferImageGranularity,
    10925  size,
    10926  alignment,
    10927  isUpperAddress,
    10928  suballocType,
    10929  false, // canMakeOtherLost
    10930  strategy,
    10931  &currRequest))
    10932  {
    10933  // Allocate from pCurrBlock.
    10934  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10935 
    10936  if(mapped)
    10937  {
    10938  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10939  if(res != VK_SUCCESS)
    10940  {
    10941  return res;
    10942  }
    10943  }
    10944 
    10945  // We no longer have an empty Allocation.
    10946  if(pBlock->m_pMetadata->IsEmpty())
    10947  {
    10948  m_HasEmptyBlock = false;
    10949  }
    10950 
    10951  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10952  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10953  (*pAllocation)->InitBlockAllocation(
    10954  hCurrentPool,
    10955  pBlock,
    10956  currRequest.offset,
    10957  alignment,
    10958  size,
    10959  suballocType,
    10960  mapped,
    10961  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10962  VMA_HEAVY_ASSERT(pBlock->Validate());
    10963  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10964  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10965  {
    10966  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10967  }
    10968  if(IsCorruptionDetectionEnabled())
    10969  {
    10970  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10971  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10972  }
    10973  return VK_SUCCESS;
    10974  }
    10975  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10976 }
    10977 
    10978 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10979 {
    10980  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10981  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10982  allocInfo.allocationSize = blockSize;
    10983  VkDeviceMemory mem = VK_NULL_HANDLE;
    10984  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10985  if(res < 0)
    10986  {
    10987  return res;
    10988  }
    10989 
    10990  // New VkDeviceMemory successfully created.
    10991 
    10992  // Create new Allocation for it.
    10993  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10994  pBlock->Init(
    10995  m_hAllocator,
    10996  m_MemoryTypeIndex,
    10997  mem,
    10998  allocInfo.allocationSize,
    10999  m_NextBlockId++,
    11000  m_Algorithm);
    11001 
    11002  m_Blocks.push_back(pBlock);
    11003  if(pNewBlockIndex != VMA_NULL)
    11004  {
    11005  *pNewBlockIndex = m_Blocks.size() - 1;
    11006  }
    11007 
    11008  return VK_SUCCESS;
    11009 }
    11010 
    11011 #if VMA_STATS_STRING_ENABLED
    11012 
    11013 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11014 {
    11015  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11016 
    11017  json.BeginObject();
    11018 
    11019  if(m_IsCustomPool)
    11020  {
    11021  json.WriteString("MemoryTypeIndex");
    11022  json.WriteNumber(m_MemoryTypeIndex);
    11023 
    11024  json.WriteString("BlockSize");
    11025  json.WriteNumber(m_PreferredBlockSize);
    11026 
    11027  json.WriteString("BlockCount");
    11028  json.BeginObject(true);
    11029  if(m_MinBlockCount > 0)
    11030  {
    11031  json.WriteString("Min");
    11032  json.WriteNumber((uint64_t)m_MinBlockCount);
    11033  }
    11034  if(m_MaxBlockCount < SIZE_MAX)
    11035  {
    11036  json.WriteString("Max");
    11037  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11038  }
    11039  json.WriteString("Cur");
    11040  json.WriteNumber((uint64_t)m_Blocks.size());
    11041  json.EndObject();
    11042 
    11043  if(m_FrameInUseCount > 0)
    11044  {
    11045  json.WriteString("FrameInUseCount");
    11046  json.WriteNumber(m_FrameInUseCount);
    11047  }
    11048 
    11049  if(m_Algorithm != 0)
    11050  {
    11051  json.WriteString("Algorithm");
    11052  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11053  }
    11054  }
    11055  else
    11056  {
    11057  json.WriteString("PreferredBlockSize");
    11058  json.WriteNumber(m_PreferredBlockSize);
    11059  }
    11060 
    11061  json.WriteString("Blocks");
    11062  json.BeginObject();
    11063  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11064  {
    11065  json.BeginString();
    11066  json.ContinueString(m_Blocks[i]->GetId());
    11067  json.EndString();
    11068 
    11069  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11070  }
    11071  json.EndObject();
    11072 
    11073  json.EndObject();
    11074 }
    11075 
    11076 #endif // #if VMA_STATS_STRING_ENABLED
    11077 
    11078 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11079  VmaAllocator hAllocator,
    11080  uint32_t currentFrameIndex)
    11081 {
    11082  if(m_pDefragmentator == VMA_NULL)
    11083  {
    11084  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11085  hAllocator,
    11086  this,
    11087  currentFrameIndex);
    11088  }
    11089 
    11090  return m_pDefragmentator;
    11091 }
    11092 
    11093 VkResult VmaBlockVector::Defragment(
    11094  VmaDefragmentationStats* pDefragmentationStats,
    11095  VkDeviceSize& maxBytesToMove,
    11096  uint32_t& maxAllocationsToMove)
    11097 {
    11098  if(m_pDefragmentator == VMA_NULL)
    11099  {
    11100  return VK_SUCCESS;
    11101  }
    11102 
    11103  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11104 
    11105  // Defragment.
    11106  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11107 
    11108  // Accumulate statistics.
    11109  if(pDefragmentationStats != VMA_NULL)
    11110  {
    11111  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11112  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11113  pDefragmentationStats->bytesMoved += bytesMoved;
    11114  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11115  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11116  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11117  maxBytesToMove -= bytesMoved;
    11118  maxAllocationsToMove -= allocationsMoved;
    11119  }
    11120 
    11121  // Free empty blocks.
    11122  m_HasEmptyBlock = false;
    11123  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11124  {
    11125  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11126  if(pBlock->m_pMetadata->IsEmpty())
    11127  {
    11128  if(m_Blocks.size() > m_MinBlockCount)
    11129  {
    11130  if(pDefragmentationStats != VMA_NULL)
    11131  {
    11132  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11133  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11134  }
    11135 
    11136  VmaVectorRemove(m_Blocks, blockIndex);
    11137  pBlock->Destroy(m_hAllocator);
    11138  vma_delete(m_hAllocator, pBlock);
    11139  }
    11140  else
    11141  {
    11142  m_HasEmptyBlock = true;
    11143  }
    11144  }
    11145  }
    11146 
    11147  return result;
    11148 }
    11149 
    11150 void VmaBlockVector::DestroyDefragmentator()
    11151 {
    11152  if(m_pDefragmentator != VMA_NULL)
    11153  {
    11154  vma_delete(m_hAllocator, m_pDefragmentator);
    11155  m_pDefragmentator = VMA_NULL;
    11156  }
    11157 }
    11158 
    11159 void VmaBlockVector::MakePoolAllocationsLost(
    11160  uint32_t currentFrameIndex,
    11161  size_t* pLostAllocationCount)
    11162 {
    11163  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11164  size_t lostAllocationCount = 0;
    11165  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11166  {
    11167  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11168  VMA_ASSERT(pBlock);
    11169  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11170  }
    11171  if(pLostAllocationCount != VMA_NULL)
    11172  {
    11173  *pLostAllocationCount = lostAllocationCount;
    11174  }
    11175 }
    11176 
    11177 VkResult VmaBlockVector::CheckCorruption()
    11178 {
    11179  if(!IsCorruptionDetectionEnabled())
    11180  {
    11181  return VK_ERROR_FEATURE_NOT_PRESENT;
    11182  }
    11183 
    11184  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11185  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11186  {
    11187  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11188  VMA_ASSERT(pBlock);
    11189  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11190  if(res != VK_SUCCESS)
    11191  {
    11192  return res;
    11193  }
    11194  }
    11195  return VK_SUCCESS;
    11196 }
    11197 
    11198 void VmaBlockVector::AddStats(VmaStats* pStats)
    11199 {
    11200  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11201  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11202 
    11203  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11204 
    11205  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11206  {
    11207  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11208  VMA_ASSERT(pBlock);
    11209  VMA_HEAVY_ASSERT(pBlock->Validate());
    11210  VmaStatInfo allocationStatInfo;
    11211  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11212  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11213  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11214  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11215  }
    11216 }
    11217 
    11219 // VmaDefragmentator members definition
    11220 
    11221 VmaDefragmentator::VmaDefragmentator(
    11222  VmaAllocator hAllocator,
    11223  VmaBlockVector* pBlockVector,
    11224  uint32_t currentFrameIndex) :
    11225  m_hAllocator(hAllocator),
    11226  m_pBlockVector(pBlockVector),
    11227  m_CurrentFrameIndex(currentFrameIndex),
    11228  m_BytesMoved(0),
    11229  m_AllocationsMoved(0),
    11230  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11231  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11232 {
    11233  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11234 }
    11235 
    11236 VmaDefragmentator::~VmaDefragmentator()
    11237 {
    11238  for(size_t i = m_Blocks.size(); i--; )
    11239  {
    11240  vma_delete(m_hAllocator, m_Blocks[i]);
    11241  }
    11242 }
    11243 
    11244 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11245 {
    11246  AllocationInfo allocInfo;
    11247  allocInfo.m_hAllocation = hAlloc;
    11248  allocInfo.m_pChanged = pChanged;
    11249  m_Allocations.push_back(allocInfo);
    11250 }
    11251 
    11252 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11253 {
    11254  // It has already been mapped for defragmentation.
    11255  if(m_pMappedDataForDefragmentation)
    11256  {
    11257  *ppMappedData = m_pMappedDataForDefragmentation;
    11258  return VK_SUCCESS;
    11259  }
    11260 
    11261  // It is originally mapped.
    11262  if(m_pBlock->GetMappedData())
    11263  {
    11264  *ppMappedData = m_pBlock->GetMappedData();
    11265  return VK_SUCCESS;
    11266  }
    11267 
    11268  // Map on first usage.
    11269  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11270  *ppMappedData = m_pMappedDataForDefragmentation;
    11271  return res;
    11272 }
    11273 
    11274 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11275 {
    11276  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11277  {
    11278  m_pBlock->Unmap(hAllocator, 1);
    11279  }
    11280 }
    11281 
    11282 VkResult VmaDefragmentator::DefragmentRound(
    11283  VkDeviceSize maxBytesToMove,
    11284  uint32_t maxAllocationsToMove)
    11285 {
    11286  if(m_Blocks.empty())
    11287  {
    11288  return VK_SUCCESS;
    11289  }
    11290 
    11291  size_t srcBlockIndex = m_Blocks.size() - 1;
    11292  size_t srcAllocIndex = SIZE_MAX;
    11293  for(;;)
    11294  {
    11295  // 1. Find next allocation to move.
    11296  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11297  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11298  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11299  {
    11300  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11301  {
    11302  // Finished: no more allocations to process.
    11303  if(srcBlockIndex == 0)
    11304  {
    11305  return VK_SUCCESS;
    11306  }
    11307  else
    11308  {
    11309  --srcBlockIndex;
    11310  srcAllocIndex = SIZE_MAX;
    11311  }
    11312  }
    11313  else
    11314  {
    11315  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11316  }
    11317  }
    11318 
    11319  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11320  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11321 
    11322  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11323  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11324  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11325  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11326 
    11327  // 2. Try to find new place for this allocation in preceding or current block.
    11328  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11329  {
    11330  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11331  VmaAllocationRequest dstAllocRequest;
    11332  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11333  m_CurrentFrameIndex,
    11334  m_pBlockVector->GetFrameInUseCount(),
    11335  m_pBlockVector->GetBufferImageGranularity(),
    11336  size,
    11337  alignment,
    11338  false, // upperAddress
    11339  suballocType,
    11340  false, // canMakeOtherLost
    11342  &dstAllocRequest) &&
    11343  MoveMakesSense(
    11344  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11345  {
    11346  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11347 
    11348  // Reached limit on number of allocations or bytes to move.
    11349  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11350  (m_BytesMoved + size > maxBytesToMove))
    11351  {
    11352  return VK_INCOMPLETE;
    11353  }
    11354 
    11355  void* pDstMappedData = VMA_NULL;
    11356  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11357  if(res != VK_SUCCESS)
    11358  {
    11359  return res;
    11360  }
    11361 
    11362  void* pSrcMappedData = VMA_NULL;
    11363  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11364  if(res != VK_SUCCESS)
    11365  {
    11366  return res;
    11367  }
    11368 
    11369  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11370  memcpy(
    11371  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11372  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11373  static_cast<size_t>(size));
    11374 
    11375  if(VMA_DEBUG_MARGIN > 0)
    11376  {
    11377  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11378  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11379  }
    11380 
    11381  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11382  dstAllocRequest,
    11383  suballocType,
    11384  size,
    11385  false, // upperAddress
    11386  allocInfo.m_hAllocation);
    11387  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11388 
    11389  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11390 
    11391  if(allocInfo.m_pChanged != VMA_NULL)
    11392  {
    11393  *allocInfo.m_pChanged = VK_TRUE;
    11394  }
    11395 
    11396  ++m_AllocationsMoved;
    11397  m_BytesMoved += size;
    11398 
    11399  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11400 
    11401  break;
    11402  }
    11403  }
    11404 
    11405  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11406 
    11407  if(srcAllocIndex > 0)
    11408  {
    11409  --srcAllocIndex;
    11410  }
    11411  else
    11412  {
    11413  if(srcBlockIndex > 0)
    11414  {
    11415  --srcBlockIndex;
    11416  srcAllocIndex = SIZE_MAX;
    11417  }
    11418  else
    11419  {
    11420  return VK_SUCCESS;
    11421  }
    11422  }
    11423  }
    11424 }
    11425 
    11426 VkResult VmaDefragmentator::Defragment(
    11427  VkDeviceSize maxBytesToMove,
    11428  uint32_t maxAllocationsToMove)
    11429 {
    11430  if(m_Allocations.empty())
    11431  {
    11432  return VK_SUCCESS;
    11433  }
    11434 
    11435  // Create block info for each block.
    11436  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11437  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11438  {
    11439  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11440  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11441  m_Blocks.push_back(pBlockInfo);
    11442  }
    11443 
    11444  // Sort them by m_pBlock pointer value.
    11445  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11446 
    11447  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11448  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11449  {
    11450  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11451  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11452  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11453  {
    11454  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11455  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11456  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11457  {
    11458  (*it)->m_Allocations.push_back(allocInfo);
    11459  }
    11460  else
    11461  {
    11462  VMA_ASSERT(0);
    11463  }
    11464  }
    11465  }
    11466  m_Allocations.clear();
    11467 
    11468  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11469  {
    11470  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11471  pBlockInfo->CalcHasNonMovableAllocations();
    11472  pBlockInfo->SortAllocationsBySizeDescecnding();
    11473  }
    11474 
    11475  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11476  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11477 
    11478  // Execute defragmentation rounds (the main part).
    11479  VkResult result = VK_SUCCESS;
    11480  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11481  {
    11482  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11483  }
    11484 
    11485  // Unmap blocks that were mapped for defragmentation.
    11486  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11487  {
    11488  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11489  }
    11490 
    11491  return result;
    11492 }
    11493 
    11494 bool VmaDefragmentator::MoveMakesSense(
    11495  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11496  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11497 {
    11498  if(dstBlockIndex < srcBlockIndex)
    11499  {
    11500  return true;
    11501  }
    11502  if(dstBlockIndex > srcBlockIndex)
    11503  {
    11504  return false;
    11505  }
    11506  if(dstOffset < srcOffset)
    11507  {
    11508  return true;
    11509  }
    11510  return false;
    11511 }
    11512 
    11514 // VmaRecorder
    11515 
    11516 #if VMA_RECORDING_ENABLED
    11517 
    11518 VmaRecorder::VmaRecorder() :
    11519  m_UseMutex(true),
    11520  m_Flags(0),
    11521  m_File(VMA_NULL),
    11522  m_Freq(INT64_MAX),
    11523  m_StartCounter(INT64_MAX)
    11524 {
    11525 }
    11526 
    11527 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11528 {
    11529  m_UseMutex = useMutex;
    11530  m_Flags = settings.flags;
    11531 
    11532  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11533  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11534 
    11535  // Open file for writing.
    11536  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11537  if(err != 0)
    11538  {
    11539  return VK_ERROR_INITIALIZATION_FAILED;
    11540  }
    11541 
    11542  // Write header.
    11543  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11544  fprintf(m_File, "%s\n", "1,4");
    11545 
    11546  return VK_SUCCESS;
    11547 }
    11548 
    11549 VmaRecorder::~VmaRecorder()
    11550 {
    11551  if(m_File != VMA_NULL)
    11552  {
    11553  fclose(m_File);
    11554  }
    11555 }
    11556 
    11557 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11558 {
    11559  CallParams callParams;
    11560  GetBasicParams(callParams);
    11561 
    11562  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11563  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11564  Flush();
    11565 }
    11566 
    11567 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11568 {
    11569  CallParams callParams;
    11570  GetBasicParams(callParams);
    11571 
    11572  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11573  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11574  Flush();
    11575 }
    11576 
    11577 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11578 {
    11579  CallParams callParams;
    11580  GetBasicParams(callParams);
    11581 
    11582  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11583  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11584  createInfo.memoryTypeIndex,
    11585  createInfo.flags,
    11586  createInfo.blockSize,
    11587  (uint64_t)createInfo.minBlockCount,
    11588  (uint64_t)createInfo.maxBlockCount,
    11589  createInfo.frameInUseCount,
    11590  pool);
    11591  Flush();
    11592 }
    11593 
    11594 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11595 {
    11596  CallParams callParams;
    11597  GetBasicParams(callParams);
    11598 
    11599  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11600  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11601  pool);
    11602  Flush();
    11603 }
    11604 
    11605 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11606  const VkMemoryRequirements& vkMemReq,
    11607  const VmaAllocationCreateInfo& createInfo,
    11608  VmaAllocation allocation)
    11609 {
    11610  CallParams callParams;
    11611  GetBasicParams(callParams);
    11612 
    11613  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11614  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11615  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11616  vkMemReq.size,
    11617  vkMemReq.alignment,
    11618  vkMemReq.memoryTypeBits,
    11619  createInfo.flags,
    11620  createInfo.usage,
    11621  createInfo.requiredFlags,
    11622  createInfo.preferredFlags,
    11623  createInfo.memoryTypeBits,
    11624  createInfo.pool,
    11625  allocation,
    11626  userDataStr.GetString());
    11627  Flush();
    11628 }
    11629 
    11630 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11631  const VkMemoryRequirements& vkMemReq,
    11632  bool requiresDedicatedAllocation,
    11633  bool prefersDedicatedAllocation,
    11634  const VmaAllocationCreateInfo& createInfo,
    11635  VmaAllocation allocation)
    11636 {
    11637  CallParams callParams;
    11638  GetBasicParams(callParams);
    11639 
    11640  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11641  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11642  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11643  vkMemReq.size,
    11644  vkMemReq.alignment,
    11645  vkMemReq.memoryTypeBits,
    11646  requiresDedicatedAllocation ? 1 : 0,
    11647  prefersDedicatedAllocation ? 1 : 0,
    11648  createInfo.flags,
    11649  createInfo.usage,
    11650  createInfo.requiredFlags,
    11651  createInfo.preferredFlags,
    11652  createInfo.memoryTypeBits,
    11653  createInfo.pool,
    11654  allocation,
    11655  userDataStr.GetString());
    11656  Flush();
    11657 }
    11658 
    11659 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11660  const VkMemoryRequirements& vkMemReq,
    11661  bool requiresDedicatedAllocation,
    11662  bool prefersDedicatedAllocation,
    11663  const VmaAllocationCreateInfo& createInfo,
    11664  VmaAllocation allocation)
    11665 {
    11666  CallParams callParams;
    11667  GetBasicParams(callParams);
    11668 
    11669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11672  vkMemReq.size,
    11673  vkMemReq.alignment,
    11674  vkMemReq.memoryTypeBits,
    11675  requiresDedicatedAllocation ? 1 : 0,
    11676  prefersDedicatedAllocation ? 1 : 0,
    11677  createInfo.flags,
    11678  createInfo.usage,
    11679  createInfo.requiredFlags,
    11680  createInfo.preferredFlags,
    11681  createInfo.memoryTypeBits,
    11682  createInfo.pool,
    11683  allocation,
    11684  userDataStr.GetString());
    11685  Flush();
    11686 }
    11687 
    11688 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11689  VmaAllocation allocation)
    11690 {
    11691  CallParams callParams;
    11692  GetBasicParams(callParams);
    11693 
    11694  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11695  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11696  allocation);
    11697  Flush();
    11698 }
    11699 
    11700 void VmaRecorder::RecordResizeAllocation(
    11701  uint32_t frameIndex,
    11702  VmaAllocation allocation,
    11703  VkDeviceSize newSize)
    11704 {
    11705  CallParams callParams;
    11706  GetBasicParams(callParams);
    11707 
    11708  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11709  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11710  allocation, newSize);
    11711  Flush();
    11712 }
    11713 
    11714 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11715  VmaAllocation allocation,
    11716  const void* pUserData)
    11717 {
    11718  CallParams callParams;
    11719  GetBasicParams(callParams);
    11720 
    11721  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11722  UserDataString userDataStr(
    11723  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11724  pUserData);
    11725  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11726  allocation,
    11727  userDataStr.GetString());
    11728  Flush();
    11729 }
    11730 
    11731 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11732  VmaAllocation allocation)
    11733 {
    11734  CallParams callParams;
    11735  GetBasicParams(callParams);
    11736 
    11737  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11738  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11739  allocation);
    11740  Flush();
    11741 }
    11742 
    11743 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11744  VmaAllocation allocation)
    11745 {
    11746  CallParams callParams;
    11747  GetBasicParams(callParams);
    11748 
    11749  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11750  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11751  allocation);
    11752  Flush();
    11753 }
    11754 
    11755 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11756  VmaAllocation allocation)
    11757 {
    11758  CallParams callParams;
    11759  GetBasicParams(callParams);
    11760 
    11761  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11762  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11763  allocation);
    11764  Flush();
    11765 }
    11766 
    11767 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11768  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11769 {
    11770  CallParams callParams;
    11771  GetBasicParams(callParams);
    11772 
    11773  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11774  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11775  allocation,
    11776  offset,
    11777  size);
    11778  Flush();
    11779 }
    11780 
    11781 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11782  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11783 {
    11784  CallParams callParams;
    11785  GetBasicParams(callParams);
    11786 
    11787  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11788  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11789  allocation,
    11790  offset,
    11791  size);
    11792  Flush();
    11793 }
    11794 
    11795 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11796  const VkBufferCreateInfo& bufCreateInfo,
    11797  const VmaAllocationCreateInfo& allocCreateInfo,
    11798  VmaAllocation allocation)
    11799 {
    11800  CallParams callParams;
    11801  GetBasicParams(callParams);
    11802 
    11803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11804  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11805  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11806  bufCreateInfo.flags,
    11807  bufCreateInfo.size,
    11808  bufCreateInfo.usage,
    11809  bufCreateInfo.sharingMode,
    11810  allocCreateInfo.flags,
    11811  allocCreateInfo.usage,
    11812  allocCreateInfo.requiredFlags,
    11813  allocCreateInfo.preferredFlags,
    11814  allocCreateInfo.memoryTypeBits,
    11815  allocCreateInfo.pool,
    11816  allocation,
    11817  userDataStr.GetString());
    11818  Flush();
    11819 }
    11820 
    11821 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11822  const VkImageCreateInfo& imageCreateInfo,
    11823  const VmaAllocationCreateInfo& allocCreateInfo,
    11824  VmaAllocation allocation)
    11825 {
    11826  CallParams callParams;
    11827  GetBasicParams(callParams);
    11828 
    11829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11830  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11831  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11832  imageCreateInfo.flags,
    11833  imageCreateInfo.imageType,
    11834  imageCreateInfo.format,
    11835  imageCreateInfo.extent.width,
    11836  imageCreateInfo.extent.height,
    11837  imageCreateInfo.extent.depth,
    11838  imageCreateInfo.mipLevels,
    11839  imageCreateInfo.arrayLayers,
    11840  imageCreateInfo.samples,
    11841  imageCreateInfo.tiling,
    11842  imageCreateInfo.usage,
    11843  imageCreateInfo.sharingMode,
    11844  imageCreateInfo.initialLayout,
    11845  allocCreateInfo.flags,
    11846  allocCreateInfo.usage,
    11847  allocCreateInfo.requiredFlags,
    11848  allocCreateInfo.preferredFlags,
    11849  allocCreateInfo.memoryTypeBits,
    11850  allocCreateInfo.pool,
    11851  allocation,
    11852  userDataStr.GetString());
    11853  Flush();
    11854 }
    11855 
    11856 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11857  VmaAllocation allocation)
    11858 {
    11859  CallParams callParams;
    11860  GetBasicParams(callParams);
    11861 
    11862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11863  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11864  allocation);
    11865  Flush();
    11866 }
    11867 
    11868 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11869  VmaAllocation allocation)
    11870 {
    11871  CallParams callParams;
    11872  GetBasicParams(callParams);
    11873 
    11874  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11875  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11876  allocation);
    11877  Flush();
    11878 }
    11879 
    11880 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11881  VmaAllocation allocation)
    11882 {
    11883  CallParams callParams;
    11884  GetBasicParams(callParams);
    11885 
    11886  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11887  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11888  allocation);
    11889  Flush();
    11890 }
    11891 
    11892 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11893  VmaAllocation allocation)
    11894 {
    11895  CallParams callParams;
    11896  GetBasicParams(callParams);
    11897 
    11898  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11899  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11900  allocation);
    11901  Flush();
    11902 }
    11903 
    11904 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11905  VmaPool pool)
    11906 {
    11907  CallParams callParams;
    11908  GetBasicParams(callParams);
    11909 
    11910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11911  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11912  pool);
    11913  Flush();
    11914 }
    11915 
    11916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11917 {
    11918  if(pUserData != VMA_NULL)
    11919  {
    11920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11921  {
    11922  m_Str = (const char*)pUserData;
    11923  }
    11924  else
    11925  {
    11926  sprintf_s(m_PtrStr, "%p", pUserData);
    11927  m_Str = m_PtrStr;
    11928  }
    11929  }
    11930  else
    11931  {
    11932  m_Str = "";
    11933  }
    11934 }
    11935 
    11936 void VmaRecorder::WriteConfiguration(
    11937  const VkPhysicalDeviceProperties& devProps,
    11938  const VkPhysicalDeviceMemoryProperties& memProps,
    11939  bool dedicatedAllocationExtensionEnabled)
    11940 {
    11941  fprintf(m_File, "Config,Begin\n");
    11942 
    11943  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11944  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11945  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11946  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11947  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11948  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11949 
    11950  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11951  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11952  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11953 
    11954  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11955  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11956  {
    11957  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11958  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11959  }
    11960  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11961  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11965  }
    11966 
    11967  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11968 
    11969  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11970  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11971  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11972  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11973  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11974  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11976  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11977  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11978 
    11979  fprintf(m_File, "Config,End\n");
    11980 }
    11981 
    11982 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11983 {
    11984  outParams.threadId = GetCurrentThreadId();
    11985 
    11986  LARGE_INTEGER counter;
    11987  QueryPerformanceCounter(&counter);
    11988  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11989 }
    11990 
    11991 void VmaRecorder::Flush()
    11992 {
    11993  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11994  {
    11995  fflush(m_File);
    11996  }
    11997 }
    11998 
    11999 #endif // #if VMA_RECORDING_ENABLED
    12000 
    12002 // VmaAllocator_T
    12003 
    12004 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12005  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12006  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12007  m_hDevice(pCreateInfo->device),
    12008  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12009  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12010  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12011  m_PreferredLargeHeapBlockSize(0),
    12012  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12013  m_CurrentFrameIndex(0),
    12014  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12015  m_NextPoolId(0)
    12017  ,m_pRecorder(VMA_NULL)
    12018 #endif
    12019 {
    12020  if(VMA_DEBUG_DETECT_CORRUPTION)
    12021  {
    12022  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12023  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12024  }
    12025 
    12026  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12027 
    12028 #if !(VMA_DEDICATED_ALLOCATION)
    12030  {
    12031  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12032  }
    12033 #endif
    12034 
    12035  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12036  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12037  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12038 
    12039  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12040  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12041 
    12042  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12043  {
    12044  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12045  }
    12046 
    12047  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12048  {
    12049  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12050  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12051  }
    12052 
    12053  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12054 
    12055  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12056  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12057 
    12058  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12059  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12060  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12061  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12062 
    12063  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12064  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12065 
    12066  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12067  {
    12068  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12069  {
    12070  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12071  if(limit != VK_WHOLE_SIZE)
    12072  {
    12073  m_HeapSizeLimit[heapIndex] = limit;
    12074  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12075  {
    12076  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12077  }
    12078  }
    12079  }
    12080  }
    12081 
    12082  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12083  {
    12084  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12085 
    12086  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12087  this,
    12088  memTypeIndex,
    12089  preferredBlockSize,
    12090  0,
    12091  SIZE_MAX,
    12092  GetBufferImageGranularity(),
    12093  pCreateInfo->frameInUseCount,
    12094  false, // isCustomPool
    12095  false, // explicitBlockSize
    12096  false); // linearAlgorithm
    12097  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12098  // becase minBlockCount is 0.
    12099  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12100 
    12101  }
    12102 }
    12103 
    12104 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12105 {
    12106  VkResult res = VK_SUCCESS;
    12107 
    12108  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12109  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12110  {
    12111 #if VMA_RECORDING_ENABLED
    12112  m_pRecorder = vma_new(this, VmaRecorder)();
    12113  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12114  if(res != VK_SUCCESS)
    12115  {
    12116  return res;
    12117  }
    12118  m_pRecorder->WriteConfiguration(
    12119  m_PhysicalDeviceProperties,
    12120  m_MemProps,
    12121  m_UseKhrDedicatedAllocation);
    12122  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12123 #else
    12124  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12125  return VK_ERROR_FEATURE_NOT_PRESENT;
    12126 #endif
    12127  }
    12128 
    12129  return res;
    12130 }
    12131 
    12132 VmaAllocator_T::~VmaAllocator_T()
    12133 {
    12134 #if VMA_RECORDING_ENABLED
    12135  if(m_pRecorder != VMA_NULL)
    12136  {
    12137  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12138  vma_delete(this, m_pRecorder);
    12139  }
    12140 #endif
    12141 
    12142  VMA_ASSERT(m_Pools.empty());
    12143 
    12144  for(size_t i = GetMemoryTypeCount(); i--; )
    12145  {
    12146  vma_delete(this, m_pDedicatedAllocations[i]);
    12147  vma_delete(this, m_pBlockVectors[i]);
    12148  }
    12149 }
    12150 
    12151 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12152 {
    12153 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12154  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12155  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12156  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12157  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12158  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12159  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12160  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12161  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12162  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12163  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12164  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12165  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12166  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12167  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12168  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12169  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12170 #if VMA_DEDICATED_ALLOCATION
    12171  if(m_UseKhrDedicatedAllocation)
    12172  {
    12173  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12174  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12175  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12176  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12177  }
    12178 #endif // #if VMA_DEDICATED_ALLOCATION
    12179 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12180 
    12181 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12182  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12183 
    12184  if(pVulkanFunctions != VMA_NULL)
    12185  {
    12186  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12187  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12188  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12189  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12190  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12191  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12192  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12193  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12194  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12197  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12198  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12199  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12200  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12201  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12202 #if VMA_DEDICATED_ALLOCATION
    12203  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12204  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12205 #endif
    12206  }
    12207 
    12208 #undef VMA_COPY_IF_NOT_NULL
    12209 
    12210  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12211  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12212  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12213  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12214  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12215  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12216  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12217  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12228 #if VMA_DEDICATED_ALLOCATION
    12229  if(m_UseKhrDedicatedAllocation)
    12230  {
    12231  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12233  }
    12234 #endif
    12235 }
    12236 
    12237 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12238 {
    12239  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12240  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12241  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12242  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12243 }
    12244 
    12245 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12246  VkDeviceSize size,
    12247  VkDeviceSize alignment,
    12248  bool dedicatedAllocation,
    12249  VkBuffer dedicatedBuffer,
    12250  VkImage dedicatedImage,
    12251  const VmaAllocationCreateInfo& createInfo,
    12252  uint32_t memTypeIndex,
    12253  VmaSuballocationType suballocType,
    12254  VmaAllocation* pAllocation)
    12255 {
    12256  VMA_ASSERT(pAllocation != VMA_NULL);
    12257  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12258 
    12259  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12260 
    12261  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12262  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12263  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12264  {
    12265  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12266  }
    12267 
    12268  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12269  VMA_ASSERT(blockVector);
    12270 
    12271  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12272  bool preferDedicatedMemory =
    12273  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12274  dedicatedAllocation ||
    12275  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12276  size > preferredBlockSize / 2;
    12277 
    12278  if(preferDedicatedMemory &&
    12279  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12280  finalCreateInfo.pool == VK_NULL_HANDLE)
    12281  {
    12283  }
    12284 
    12285  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12286  {
    12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12288  {
    12289  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12290  }
    12291  else
    12292  {
    12293  return AllocateDedicatedMemory(
    12294  size,
    12295  suballocType,
    12296  memTypeIndex,
    12297  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12298  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12299  finalCreateInfo.pUserData,
    12300  dedicatedBuffer,
    12301  dedicatedImage,
    12302  pAllocation);
    12303  }
    12304  }
    12305  else
    12306  {
    12307  VkResult res = blockVector->Allocate(
    12308  VK_NULL_HANDLE, // hCurrentPool
    12309  m_CurrentFrameIndex.load(),
    12310  size,
    12311  alignment,
    12312  finalCreateInfo,
    12313  suballocType,
    12314  pAllocation);
    12315  if(res == VK_SUCCESS)
    12316  {
    12317  return res;
    12318  }
    12319 
    12320  // 5. Try dedicated memory.
    12321  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12322  {
    12323  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12324  }
    12325  else
    12326  {
    12327  res = AllocateDedicatedMemory(
    12328  size,
    12329  suballocType,
    12330  memTypeIndex,
    12331  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12332  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12333  finalCreateInfo.pUserData,
    12334  dedicatedBuffer,
    12335  dedicatedImage,
    12336  pAllocation);
    12337  if(res == VK_SUCCESS)
    12338  {
    12339  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12340  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12341  return VK_SUCCESS;
    12342  }
    12343  else
    12344  {
    12345  // Everything failed: Return error code.
    12346  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12347  return res;
    12348  }
    12349  }
    12350  }
    12351 }
    12352 
    12353 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12354  VkDeviceSize size,
    12355  VmaSuballocationType suballocType,
    12356  uint32_t memTypeIndex,
    12357  bool map,
    12358  bool isUserDataString,
    12359  void* pUserData,
    12360  VkBuffer dedicatedBuffer,
    12361  VkImage dedicatedImage,
    12362  VmaAllocation* pAllocation)
    12363 {
    12364  VMA_ASSERT(pAllocation);
    12365 
    12366  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12367  allocInfo.memoryTypeIndex = memTypeIndex;
    12368  allocInfo.allocationSize = size;
    12369 
    12370 #if VMA_DEDICATED_ALLOCATION
    12371  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12372  if(m_UseKhrDedicatedAllocation)
    12373  {
    12374  if(dedicatedBuffer != VK_NULL_HANDLE)
    12375  {
    12376  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12377  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12378  allocInfo.pNext = &dedicatedAllocInfo;
    12379  }
    12380  else if(dedicatedImage != VK_NULL_HANDLE)
    12381  {
    12382  dedicatedAllocInfo.image = dedicatedImage;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  }
    12386 #endif // #if VMA_DEDICATED_ALLOCATION
    12387 
    12388  // Allocate VkDeviceMemory.
    12389  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12390  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12391  if(res < 0)
    12392  {
    12393  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12394  return res;
    12395  }
    12396 
    12397  void* pMappedData = VMA_NULL;
    12398  if(map)
    12399  {
    12400  res = (*m_VulkanFunctions.vkMapMemory)(
    12401  m_hDevice,
    12402  hMemory,
    12403  0,
    12404  VK_WHOLE_SIZE,
    12405  0,
    12406  &pMappedData);
    12407  if(res < 0)
    12408  {
    12409  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12410  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12411  return res;
    12412  }
    12413  }
    12414 
    12415  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12416  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12417  (*pAllocation)->SetUserData(this, pUserData);
    12418  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12419  {
    12420  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12421  }
    12422 
    12423  // Register it in m_pDedicatedAllocations.
    12424  {
    12425  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12426  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12427  VMA_ASSERT(pDedicatedAllocations);
    12428  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12429  }
    12430 
    12431  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12432 
    12433  return VK_SUCCESS;
    12434 }
    12435 
    12436 void VmaAllocator_T::GetBufferMemoryRequirements(
    12437  VkBuffer hBuffer,
    12438  VkMemoryRequirements& memReq,
    12439  bool& requiresDedicatedAllocation,
    12440  bool& prefersDedicatedAllocation) const
    12441 {
    12442 #if VMA_DEDICATED_ALLOCATION
    12443  if(m_UseKhrDedicatedAllocation)
    12444  {
    12445  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12446  memReqInfo.buffer = hBuffer;
    12447 
    12448  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12449 
    12450  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12451  memReq2.pNext = &memDedicatedReq;
    12452 
    12453  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12454 
    12455  memReq = memReq2.memoryRequirements;
    12456  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12457  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12458  }
    12459  else
    12460 #endif // #if VMA_DEDICATED_ALLOCATION
    12461  {
    12462  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12463  requiresDedicatedAllocation = false;
    12464  prefersDedicatedAllocation = false;
    12465  }
    12466 }
    12467 
    12468 void VmaAllocator_T::GetImageMemoryRequirements(
    12469  VkImage hImage,
    12470  VkMemoryRequirements& memReq,
    12471  bool& requiresDedicatedAllocation,
    12472  bool& prefersDedicatedAllocation) const
    12473 {
    12474 #if VMA_DEDICATED_ALLOCATION
    12475  if(m_UseKhrDedicatedAllocation)
    12476  {
    12477  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12478  memReqInfo.image = hImage;
    12479 
    12480  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12481 
    12482  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12483  memReq2.pNext = &memDedicatedReq;
    12484 
    12485  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12486 
    12487  memReq = memReq2.memoryRequirements;
    12488  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12489  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12490  }
    12491  else
    12492 #endif // #if VMA_DEDICATED_ALLOCATION
    12493  {
    12494  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12495  requiresDedicatedAllocation = false;
    12496  prefersDedicatedAllocation = false;
    12497  }
    12498 }
    12499 
    12500 VkResult VmaAllocator_T::AllocateMemory(
    12501  const VkMemoryRequirements& vkMemReq,
    12502  bool requiresDedicatedAllocation,
    12503  bool prefersDedicatedAllocation,
    12504  VkBuffer dedicatedBuffer,
    12505  VkImage dedicatedImage,
    12506  const VmaAllocationCreateInfo& createInfo,
    12507  VmaSuballocationType suballocType,
    12508  VmaAllocation* pAllocation)
    12509 {
    12510  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12511 
    12512  if(vkMemReq.size == 0)
    12513  {
    12514  return VK_ERROR_VALIDATION_FAILED_EXT;
    12515  }
    12516  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12517  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12518  {
    12519  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if(requiresDedicatedAllocation)
    12529  {
    12530  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12531  {
    12532  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12533  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12534  }
    12535  if(createInfo.pool != VK_NULL_HANDLE)
    12536  {
    12537  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  }
    12541  if((createInfo.pool != VK_NULL_HANDLE) &&
    12542  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12543  {
    12544  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12545  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12546  }
    12547 
    12548  if(createInfo.pool != VK_NULL_HANDLE)
    12549  {
    12550  const VkDeviceSize alignmentForPool = VMA_MAX(
    12551  vkMemReq.alignment,
    12552  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12553  return createInfo.pool->m_BlockVector.Allocate(
    12554  createInfo.pool,
    12555  m_CurrentFrameIndex.load(),
    12556  vkMemReq.size,
    12557  alignmentForPool,
    12558  createInfo,
    12559  suballocType,
    12560  pAllocation);
    12561  }
    12562  else
    12563  {
    12564  // Bit mask of memory Vulkan types acceptable for this allocation.
    12565  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12566  uint32_t memTypeIndex = UINT32_MAX;
    12567  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12568  if(res == VK_SUCCESS)
    12569  {
    12570  VkDeviceSize alignmentForMemType = VMA_MAX(
    12571  vkMemReq.alignment,
    12572  GetMemoryTypeMinAlignment(memTypeIndex));
    12573 
    12574  res = AllocateMemoryOfType(
    12575  vkMemReq.size,
    12576  alignmentForMemType,
    12577  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12578  dedicatedBuffer,
    12579  dedicatedImage,
    12580  createInfo,
    12581  memTypeIndex,
    12582  suballocType,
    12583  pAllocation);
    12584  // Succeeded on first try.
    12585  if(res == VK_SUCCESS)
    12586  {
    12587  return res;
    12588  }
    12589  // Allocation from this memory type failed. Try other compatible memory types.
    12590  else
    12591  {
    12592  for(;;)
    12593  {
    12594  // Remove old memTypeIndex from list of possibilities.
    12595  memoryTypeBits &= ~(1u << memTypeIndex);
    12596  // Find alternative memTypeIndex.
    12597  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12598  if(res == VK_SUCCESS)
    12599  {
    12600  alignmentForMemType = VMA_MAX(
    12601  vkMemReq.alignment,
    12602  GetMemoryTypeMinAlignment(memTypeIndex));
    12603 
    12604  res = AllocateMemoryOfType(
    12605  vkMemReq.size,
    12606  alignmentForMemType,
    12607  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12608  dedicatedBuffer,
    12609  dedicatedImage,
    12610  createInfo,
    12611  memTypeIndex,
    12612  suballocType,
    12613  pAllocation);
    12614  // Allocation from this alternative memory type succeeded.
    12615  if(res == VK_SUCCESS)
    12616  {
    12617  return res;
    12618  }
    12619  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12620  }
    12621  // No other matching memory type index could be found.
    12622  else
    12623  {
    12624  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12625  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12626  }
    12627  }
    12628  }
    12629  }
    12630  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12631  else
    12632  return res;
    12633  }
    12634 }
    12635 
    12636 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12637 {
    12638  VMA_ASSERT(allocation);
    12639 
    12640  if(TouchAllocation(allocation))
    12641  {
    12642  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12643  {
    12644  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12645  }
    12646 
    12647  switch(allocation->GetType())
    12648  {
    12649  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12650  {
    12651  VmaBlockVector* pBlockVector = VMA_NULL;
    12652  VmaPool hPool = allocation->GetPool();
    12653  if(hPool != VK_NULL_HANDLE)
    12654  {
    12655  pBlockVector = &hPool->m_BlockVector;
    12656  }
    12657  else
    12658  {
    12659  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12660  pBlockVector = m_pBlockVectors[memTypeIndex];
    12661  }
    12662  pBlockVector->Free(allocation);
    12663  }
    12664  break;
    12665  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12666  FreeDedicatedMemory(allocation);
    12667  break;
    12668  default:
    12669  VMA_ASSERT(0);
    12670  }
    12671  }
    12672 
    12673  allocation->SetUserData(this, VMA_NULL);
    12674  vma_delete(this, allocation);
    12675 }
    12676 
    12677 VkResult VmaAllocator_T::ResizeAllocation(
    12678  const VmaAllocation alloc,
    12679  VkDeviceSize newSize)
    12680 {
    12681  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12682  {
    12683  return VK_ERROR_VALIDATION_FAILED_EXT;
    12684  }
    12685  if(newSize == alloc->GetSize())
    12686  {
    12687  return VK_SUCCESS;
    12688  }
    12689 
    12690  switch(alloc->GetType())
    12691  {
    12692  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12693  return VK_ERROR_FEATURE_NOT_PRESENT;
    12694  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12695  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12696  {
    12697  alloc->ChangeSize(newSize);
    12698  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12699  return VK_SUCCESS;
    12700  }
    12701  else
    12702  {
    12703  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12704  }
    12705  default:
    12706  VMA_ASSERT(0);
    12707  return VK_ERROR_VALIDATION_FAILED_EXT;
    12708  }
    12709 }
    12710 
    12711 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12712 {
    12713  // Initialize.
    12714  InitStatInfo(pStats->total);
    12715  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12716  InitStatInfo(pStats->memoryType[i]);
    12717  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12718  InitStatInfo(pStats->memoryHeap[i]);
    12719 
    12720  // Process default pools.
    12721  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12722  {
    12723  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12724  VMA_ASSERT(pBlockVector);
    12725  pBlockVector->AddStats(pStats);
    12726  }
    12727 
    12728  // Process custom pools.
    12729  {
    12730  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12731  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12732  {
    12733  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12734  }
    12735  }
    12736 
    12737  // Process dedicated allocations.
    12738  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12739  {
    12740  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12741  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12742  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12743  VMA_ASSERT(pDedicatedAllocVector);
    12744  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12745  {
    12746  VmaStatInfo allocationStatInfo;
    12747  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12748  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12749  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12750  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12751  }
    12752  }
    12753 
    12754  // Postprocess.
    12755  VmaPostprocessCalcStatInfo(pStats->total);
    12756  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12757  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12758  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12759  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12760 }
    12761 
    12762 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12763 
    12764 VkResult VmaAllocator_T::Defragment(
    12765  VmaAllocation* pAllocations,
    12766  size_t allocationCount,
    12767  VkBool32* pAllocationsChanged,
    12768  const VmaDefragmentationInfo* pDefragmentationInfo,
    12769  VmaDefragmentationStats* pDefragmentationStats)
    12770 {
    12771  if(pAllocationsChanged != VMA_NULL)
    12772  {
    12773  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12774  }
    12775  if(pDefragmentationStats != VMA_NULL)
    12776  {
    12777  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12778  }
    12779 
    12780  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12781 
    12782  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12783 
    12784  const size_t poolCount = m_Pools.size();
    12785 
    12786  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12787  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12788  {
    12789  VmaAllocation hAlloc = pAllocations[allocIndex];
    12790  VMA_ASSERT(hAlloc);
    12791  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12792  // DedicatedAlloc cannot be defragmented.
    12793  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12794  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12795  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12796  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12797  // Lost allocation cannot be defragmented.
    12798  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12799  {
    12800  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12801 
    12802  const VmaPool hAllocPool = hAlloc->GetPool();
    12803  // This allocation belongs to custom pool.
    12804  if(hAllocPool != VK_NULL_HANDLE)
    12805  {
    12806  // Pools with linear or buddy algorithm are not defragmented.
    12807  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12808  {
    12809  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12810  }
    12811  }
    12812  // This allocation belongs to general pool.
    12813  else
    12814  {
    12815  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12816  }
    12817 
    12818  if(pAllocBlockVector != VMA_NULL)
    12819  {
    12820  VmaDefragmentator* const pDefragmentator =
    12821  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12822  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12823  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12824  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12825  }
    12826  }
    12827  }
    12828 
    12829  VkResult result = VK_SUCCESS;
    12830 
    12831  // ======== Main processing.
    12832 
    12833  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12834  uint32_t maxAllocationsToMove = UINT32_MAX;
    12835  if(pDefragmentationInfo != VMA_NULL)
    12836  {
    12837  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12838  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12839  }
    12840 
    12841  // Process standard memory.
    12842  for(uint32_t memTypeIndex = 0;
    12843  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12844  ++memTypeIndex)
    12845  {
    12846  // Only HOST_VISIBLE memory types can be defragmented.
    12847  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12848  {
    12849  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12850  pDefragmentationStats,
    12851  maxBytesToMove,
    12852  maxAllocationsToMove);
    12853  }
    12854  }
    12855 
    12856  // Process custom pools.
    12857  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12858  {
    12859  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12860  pDefragmentationStats,
    12861  maxBytesToMove,
    12862  maxAllocationsToMove);
    12863  }
    12864 
    12865  // ======== Destroy defragmentators.
    12866 
    12867  // Process custom pools.
    12868  for(size_t poolIndex = poolCount; poolIndex--; )
    12869  {
    12870  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12871  }
    12872 
    12873  // Process standard memory.
    12874  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12875  {
    12876  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12877  {
    12878  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12879  }
    12880  }
    12881 
    12882  return result;
    12883 }
    12884 
    12885 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12886 {
    12887  if(hAllocation->CanBecomeLost())
    12888  {
    12889  /*
    12890  Warning: This is a carefully designed algorithm.
    12891  Do not modify unless you really know what you're doing :)
    12892  */
    12893  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12894  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12895  for(;;)
    12896  {
    12897  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12898  {
    12899  pAllocationInfo->memoryType = UINT32_MAX;
    12900  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12901  pAllocationInfo->offset = 0;
    12902  pAllocationInfo->size = hAllocation->GetSize();
    12903  pAllocationInfo->pMappedData = VMA_NULL;
    12904  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12905  return;
    12906  }
    12907  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12908  {
    12909  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12910  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12911  pAllocationInfo->offset = hAllocation->GetOffset();
    12912  pAllocationInfo->size = hAllocation->GetSize();
    12913  pAllocationInfo->pMappedData = VMA_NULL;
    12914  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12915  return;
    12916  }
    12917  else // Last use time earlier than current time.
    12918  {
    12919  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12920  {
    12921  localLastUseFrameIndex = localCurrFrameIndex;
    12922  }
    12923  }
    12924  }
    12925  }
    12926  else
    12927  {
    12928 #if VMA_STATS_STRING_ENABLED
    12929  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12930  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12931  for(;;)
    12932  {
    12933  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12934  if(localLastUseFrameIndex == localCurrFrameIndex)
    12935  {
    12936  break;
    12937  }
    12938  else // Last use time earlier than current time.
    12939  {
    12940  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12941  {
    12942  localLastUseFrameIndex = localCurrFrameIndex;
    12943  }
    12944  }
    12945  }
    12946 #endif
    12947 
    12948  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12949  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12950  pAllocationInfo->offset = hAllocation->GetOffset();
    12951  pAllocationInfo->size = hAllocation->GetSize();
    12952  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12953  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12954  }
    12955 }
    12956 
    12957 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12958 {
    12959  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12960  if(hAllocation->CanBecomeLost())
    12961  {
    12962  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12963  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12964  for(;;)
    12965  {
    12966  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12967  {
    12968  return false;
    12969  }
    12970  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12971  {
    12972  return true;
    12973  }
    12974  else // Last use time earlier than current time.
    12975  {
    12976  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12977  {
    12978  localLastUseFrameIndex = localCurrFrameIndex;
    12979  }
    12980  }
    12981  }
    12982  }
    12983  else
    12984  {
    12985 #if VMA_STATS_STRING_ENABLED
    12986  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12987  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12988  for(;;)
    12989  {
    12990  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12991  if(localLastUseFrameIndex == localCurrFrameIndex)
    12992  {
    12993  break;
    12994  }
    12995  else // Last use time earlier than current time.
    12996  {
    12997  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12998  {
    12999  localLastUseFrameIndex = localCurrFrameIndex;
    13000  }
    13001  }
    13002  }
    13003 #endif
    13004 
    13005  return true;
    13006  }
    13007 }
    13008 
    13009 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13010 {
    13011  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13012 
    13013  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13014 
    13015  if(newCreateInfo.maxBlockCount == 0)
    13016  {
    13017  newCreateInfo.maxBlockCount = SIZE_MAX;
    13018  }
    13019  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13020  {
    13021  return VK_ERROR_INITIALIZATION_FAILED;
    13022  }
    13023 
    13024  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13025 
    13026  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13027 
    13028  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13029  if(res != VK_SUCCESS)
    13030  {
    13031  vma_delete(this, *pPool);
    13032  *pPool = VMA_NULL;
    13033  return res;
    13034  }
    13035 
    13036  // Add to m_Pools.
    13037  {
    13038  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13039  (*pPool)->SetId(m_NextPoolId++);
    13040  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13041  }
    13042 
    13043  return VK_SUCCESS;
    13044 }
    13045 
    13046 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13047 {
    13048  // Remove from m_Pools.
    13049  {
    13050  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13051  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13052  VMA_ASSERT(success && "Pool not found in Allocator.");
    13053  }
    13054 
    13055  vma_delete(this, pool);
    13056 }
    13057 
    13058 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13059 {
    13060  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13061 }
    13062 
    13063 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13064 {
    13065  m_CurrentFrameIndex.store(frameIndex);
    13066 }
    13067 
    13068 void VmaAllocator_T::MakePoolAllocationsLost(
    13069  VmaPool hPool,
    13070  size_t* pLostAllocationCount)
    13071 {
    13072  hPool->m_BlockVector.MakePoolAllocationsLost(
    13073  m_CurrentFrameIndex.load(),
    13074  pLostAllocationCount);
    13075 }
    13076 
    13077 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13078 {
    13079  return hPool->m_BlockVector.CheckCorruption();
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13083 {
    13084  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13085 
    13086  // Process default pools.
    13087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13088  {
    13089  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13090  {
    13091  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13092  VMA_ASSERT(pBlockVector);
    13093  VkResult localRes = pBlockVector->CheckCorruption();
    13094  switch(localRes)
    13095  {
    13096  case VK_ERROR_FEATURE_NOT_PRESENT:
    13097  break;
    13098  case VK_SUCCESS:
    13099  finalRes = VK_SUCCESS;
    13100  break;
    13101  default:
    13102  return localRes;
    13103  }
    13104  }
    13105  }
    13106 
    13107  // Process custom pools.
    13108  {
    13109  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13110  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13111  {
    13112  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13113  {
    13114  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13115  switch(localRes)
    13116  {
    13117  case VK_ERROR_FEATURE_NOT_PRESENT:
    13118  break;
    13119  case VK_SUCCESS:
    13120  finalRes = VK_SUCCESS;
    13121  break;
    13122  default:
    13123  return localRes;
    13124  }
    13125  }
    13126  }
    13127  }
    13128 
    13129  return finalRes;
    13130 }
    13131 
    13132 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13133 {
    13134  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13135  (*pAllocation)->InitLost();
    13136 }
    13137 
    13138 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13139 {
    13140  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13141 
    13142  VkResult res;
    13143  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13144  {
    13145  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13146  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13147  {
    13148  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13149  if(res == VK_SUCCESS)
    13150  {
    13151  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13152  }
    13153  }
    13154  else
    13155  {
    13156  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13162  }
    13163 
    13164  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13165  {
    13166  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13167  }
    13168 
    13169  return res;
    13170 }
    13171 
    13172 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13173 {
    13174  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13175  {
    13176  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13177  }
    13178 
    13179  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13180 
    13181  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13182  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13183  {
    13184  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13185  m_HeapSizeLimit[heapIndex] += size;
    13186  }
    13187 }
    13188 
    13189 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13190 {
    13191  if(hAllocation->CanBecomeLost())
    13192  {
    13193  return VK_ERROR_MEMORY_MAP_FAILED;
    13194  }
    13195 
    13196  switch(hAllocation->GetType())
    13197  {
    13198  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13199  {
    13200  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13201  char *pBytes = VMA_NULL;
    13202  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13203  if(res == VK_SUCCESS)
    13204  {
    13205  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13206  hAllocation->BlockAllocMap();
    13207  }
    13208  return res;
    13209  }
    13210  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13211  return hAllocation->DedicatedAllocMap(this, ppData);
    13212  default:
    13213  VMA_ASSERT(0);
    13214  return VK_ERROR_MEMORY_MAP_FAILED;
    13215  }
    13216 }
    13217 
    13218 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13219 {
    13220  switch(hAllocation->GetType())
    13221  {
    13222  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13223  {
    13224  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13225  hAllocation->BlockAllocUnmap();
    13226  pBlock->Unmap(this, 1);
    13227  }
    13228  break;
    13229  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13230  hAllocation->DedicatedAllocUnmap(this);
    13231  break;
    13232  default:
    13233  VMA_ASSERT(0);
    13234  }
    13235 }
    13236 
    13237 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13238 {
    13239  VkResult res = VK_SUCCESS;
    13240  switch(hAllocation->GetType())
    13241  {
    13242  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13243  res = GetVulkanFunctions().vkBindBufferMemory(
    13244  m_hDevice,
    13245  hBuffer,
    13246  hAllocation->GetMemory(),
    13247  0); //memoryOffset
    13248  break;
    13249  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13250  {
    13251  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13252  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13253  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13254  break;
    13255  }
    13256  default:
    13257  VMA_ASSERT(0);
    13258  }
    13259  return res;
    13260 }
    13261 
    13262 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13263 {
    13264  VkResult res = VK_SUCCESS;
    13265  switch(hAllocation->GetType())
    13266  {
    13267  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13268  res = GetVulkanFunctions().vkBindImageMemory(
    13269  m_hDevice,
    13270  hImage,
    13271  hAllocation->GetMemory(),
    13272  0); //memoryOffset
    13273  break;
    13274  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13275  {
    13276  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13277  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13278  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13279  break;
    13280  }
    13281  default:
    13282  VMA_ASSERT(0);
    13283  }
    13284  return res;
    13285 }
    13286 
    13287 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13288  VmaAllocation hAllocation,
    13289  VkDeviceSize offset, VkDeviceSize size,
    13290  VMA_CACHE_OPERATION op)
    13291 {
    13292  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13293  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13294  {
    13295  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13296  VMA_ASSERT(offset <= allocationSize);
    13297 
    13298  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13299 
    13300  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13301  memRange.memory = hAllocation->GetMemory();
    13302 
    13303  switch(hAllocation->GetType())
    13304  {
    13305  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13306  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13307  if(size == VK_WHOLE_SIZE)
    13308  {
    13309  memRange.size = allocationSize - memRange.offset;
    13310  }
    13311  else
    13312  {
    13313  VMA_ASSERT(offset + size <= allocationSize);
    13314  memRange.size = VMA_MIN(
    13315  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13316  allocationSize - memRange.offset);
    13317  }
    13318  break;
    13319 
    13320  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13321  {
    13322  // 1. Still within this allocation.
    13323  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13324  if(size == VK_WHOLE_SIZE)
    13325  {
    13326  size = allocationSize - offset;
    13327  }
    13328  else
    13329  {
    13330  VMA_ASSERT(offset + size <= allocationSize);
    13331  }
    13332  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13333 
    13334  // 2. Adjust to whole block.
    13335  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13336  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13337  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13338  memRange.offset += allocationOffset;
    13339  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13340 
    13341  break;
    13342  }
    13343 
    13344  default:
    13345  VMA_ASSERT(0);
    13346  }
    13347 
    13348  switch(op)
    13349  {
    13350  case VMA_CACHE_FLUSH:
    13351  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13352  break;
    13353  case VMA_CACHE_INVALIDATE:
    13354  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13355  break;
    13356  default:
    13357  VMA_ASSERT(0);
    13358  }
    13359  }
    13360  // else: Just ignore this call.
    13361 }
    13362 
    13363 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13364 {
    13365  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13366 
    13367  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13368  {
    13369  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13370  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13371  VMA_ASSERT(pDedicatedAllocations);
    13372  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13373  VMA_ASSERT(success);
    13374  }
    13375 
    13376  VkDeviceMemory hMemory = allocation->GetMemory();
    13377 
    13378  /*
    13379  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13380  before vkFreeMemory.
    13381 
    13382  if(allocation->GetMappedData() != VMA_NULL)
    13383  {
    13384  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13385  }
    13386  */
    13387 
    13388  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13389 
    13390  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13391 }
    13392 
    13393 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13394 {
    13395  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13396  !hAllocation->CanBecomeLost() &&
    13397  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13398  {
    13399  void* pData = VMA_NULL;
    13400  VkResult res = Map(hAllocation, &pData);
    13401  if(res == VK_SUCCESS)
    13402  {
    13403  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13404  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13405  Unmap(hAllocation);
    13406  }
    13407  else
    13408  {
    13409  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13410  }
    13411  }
    13412 }
    13413 
    13414 #if VMA_STATS_STRING_ENABLED
    13415 
    13416 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13417 {
    13418  bool dedicatedAllocationsStarted = false;
    13419  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13420  {
    13421  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13422  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13423  VMA_ASSERT(pDedicatedAllocVector);
    13424  if(pDedicatedAllocVector->empty() == false)
    13425  {
    13426  if(dedicatedAllocationsStarted == false)
    13427  {
    13428  dedicatedAllocationsStarted = true;
    13429  json.WriteString("DedicatedAllocations");
    13430  json.BeginObject();
    13431  }
    13432 
    13433  json.BeginString("Type ");
    13434  json.ContinueString(memTypeIndex);
    13435  json.EndString();
    13436 
    13437  json.BeginArray();
    13438 
    13439  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13440  {
    13441  json.BeginObject(true);
    13442  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13443  hAlloc->PrintParameters(json);
    13444  json.EndObject();
    13445  }
    13446 
    13447  json.EndArray();
    13448  }
    13449  }
    13450  if(dedicatedAllocationsStarted)
    13451  {
    13452  json.EndObject();
    13453  }
    13454 
    13455  {
    13456  bool allocationsStarted = false;
    13457  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13458  {
    13459  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13460  {
    13461  if(allocationsStarted == false)
    13462  {
    13463  allocationsStarted = true;
    13464  json.WriteString("DefaultPools");
    13465  json.BeginObject();
    13466  }
    13467 
    13468  json.BeginString("Type ");
    13469  json.ContinueString(memTypeIndex);
    13470  json.EndString();
    13471 
    13472  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13473  }
    13474  }
    13475  if(allocationsStarted)
    13476  {
    13477  json.EndObject();
    13478  }
    13479  }
    13480 
    13481  // Custom pools
    13482  {
    13483  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13484  const size_t poolCount = m_Pools.size();
    13485  if(poolCount > 0)
    13486  {
    13487  json.WriteString("Pools");
    13488  json.BeginObject();
    13489  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13490  {
    13491  json.BeginString();
    13492  json.ContinueString(m_Pools[poolIndex]->GetId());
    13493  json.EndString();
    13494 
    13495  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13496  }
    13497  json.EndObject();
    13498  }
    13499  }
    13500 }
    13501 
    13502 #endif // #if VMA_STATS_STRING_ENABLED
    13503 
    13505 // Public interface
    13506 
    13507 VkResult vmaCreateAllocator(
    13508  const VmaAllocatorCreateInfo* pCreateInfo,
    13509  VmaAllocator* pAllocator)
    13510 {
    13511  VMA_ASSERT(pCreateInfo && pAllocator);
    13512  VMA_DEBUG_LOG("vmaCreateAllocator");
    13513  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13514  return (*pAllocator)->Init(pCreateInfo);
    13515 }
    13516 
    13517 void vmaDestroyAllocator(
    13518  VmaAllocator allocator)
    13519 {
    13520  if(allocator != VK_NULL_HANDLE)
    13521  {
    13522  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13523  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13524  vma_delete(&allocationCallbacks, allocator);
    13525  }
    13526 }
    13527 
    13529  VmaAllocator allocator,
    13530  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13531 {
    13532  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13533  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13534 }
    13535 
    13537  VmaAllocator allocator,
    13538  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13539 {
    13540  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13541  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13542 }
    13543 
    13545  VmaAllocator allocator,
    13546  uint32_t memoryTypeIndex,
    13547  VkMemoryPropertyFlags* pFlags)
    13548 {
    13549  VMA_ASSERT(allocator && pFlags);
    13550  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13551  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13552 }
    13553 
    13555  VmaAllocator allocator,
    13556  uint32_t frameIndex)
    13557 {
    13558  VMA_ASSERT(allocator);
    13559  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13560 
    13561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13562 
    13563  allocator->SetCurrentFrameIndex(frameIndex);
    13564 }
    13565 
    13566 void vmaCalculateStats(
    13567  VmaAllocator allocator,
    13568  VmaStats* pStats)
    13569 {
    13570  VMA_ASSERT(allocator && pStats);
    13571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13572  allocator->CalculateStats(pStats);
    13573 }
    13574 
    13575 #if VMA_STATS_STRING_ENABLED
    13576 
    13577 void vmaBuildStatsString(
    13578  VmaAllocator allocator,
    13579  char** ppStatsString,
    13580  VkBool32 detailedMap)
    13581 {
    13582  VMA_ASSERT(allocator && ppStatsString);
    13583  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13584 
    13585  VmaStringBuilder sb(allocator);
    13586  {
    13587  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13588  json.BeginObject();
    13589 
    13590  VmaStats stats;
    13591  allocator->CalculateStats(&stats);
    13592 
    13593  json.WriteString("Total");
    13594  VmaPrintStatInfo(json, stats.total);
    13595 
    13596  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13597  {
    13598  json.BeginString("Heap ");
    13599  json.ContinueString(heapIndex);
    13600  json.EndString();
    13601  json.BeginObject();
    13602 
    13603  json.WriteString("Size");
    13604  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13605 
    13606  json.WriteString("Flags");
    13607  json.BeginArray(true);
    13608  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13609  {
    13610  json.WriteString("DEVICE_LOCAL");
    13611  }
    13612  json.EndArray();
    13613 
    13614  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13615  {
    13616  json.WriteString("Stats");
    13617  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13618  }
    13619 
    13620  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13621  {
    13622  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13623  {
    13624  json.BeginString("Type ");
    13625  json.ContinueString(typeIndex);
    13626  json.EndString();
    13627 
    13628  json.BeginObject();
    13629 
    13630  json.WriteString("Flags");
    13631  json.BeginArray(true);
    13632  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13633  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13634  {
    13635  json.WriteString("DEVICE_LOCAL");
    13636  }
    13637  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13638  {
    13639  json.WriteString("HOST_VISIBLE");
    13640  }
    13641  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13642  {
    13643  json.WriteString("HOST_COHERENT");
    13644  }
    13645  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13646  {
    13647  json.WriteString("HOST_CACHED");
    13648  }
    13649  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13650  {
    13651  json.WriteString("LAZILY_ALLOCATED");
    13652  }
    13653  json.EndArray();
    13654 
    13655  if(stats.memoryType[typeIndex].blockCount > 0)
    13656  {
    13657  json.WriteString("Stats");
    13658  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13659  }
    13660 
    13661  json.EndObject();
    13662  }
    13663  }
    13664 
    13665  json.EndObject();
    13666  }
    13667  if(detailedMap == VK_TRUE)
    13668  {
    13669  allocator->PrintDetailedMap(json);
    13670  }
    13671 
    13672  json.EndObject();
    13673  }
    13674 
    13675  const size_t len = sb.GetLength();
    13676  char* const pChars = vma_new_array(allocator, char, len + 1);
    13677  if(len > 0)
    13678  {
    13679  memcpy(pChars, sb.GetData(), len);
    13680  }
    13681  pChars[len] = '\0';
    13682  *ppStatsString = pChars;
    13683 }
    13684 
    13685 void vmaFreeStatsString(
    13686  VmaAllocator allocator,
    13687  char* pStatsString)
    13688 {
    13689  if(pStatsString != VMA_NULL)
    13690  {
    13691  VMA_ASSERT(allocator);
    13692  size_t len = strlen(pStatsString);
    13693  vma_delete_array(allocator, pStatsString, len + 1);
    13694  }
    13695 }
    13696 
    13697 #endif // #if VMA_STATS_STRING_ENABLED
    13698 
    13699 /*
    13700 This function is not protected by any mutex because it just reads immutable data.
    13701 */
    13702 VkResult vmaFindMemoryTypeIndex(
    13703  VmaAllocator allocator,
    13704  uint32_t memoryTypeBits,
    13705  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13706  uint32_t* pMemoryTypeIndex)
    13707 {
    13708  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13709  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13710  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13711 
    13712  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13713  {
    13714  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13715  }
    13716 
    13717  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13718  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13719 
    13720  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13721  if(mapped)
    13722  {
    13723  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13724  }
    13725 
    13726  // Convert usage to requiredFlags and preferredFlags.
    13727  switch(pAllocationCreateInfo->usage)
    13728  {
    13730  break;
    13732  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13733  {
    13734  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13735  }
    13736  break;
    13738  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13739  break;
    13741  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13742  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13743  {
    13744  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13745  }
    13746  break;
    13748  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13749  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13750  break;
    13751  default:
    13752  break;
    13753  }
    13754 
    13755  *pMemoryTypeIndex = UINT32_MAX;
    13756  uint32_t minCost = UINT32_MAX;
    13757  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13758  memTypeIndex < allocator->GetMemoryTypeCount();
    13759  ++memTypeIndex, memTypeBit <<= 1)
    13760  {
    13761  // This memory type is acceptable according to memoryTypeBits bitmask.
    13762  if((memTypeBit & memoryTypeBits) != 0)
    13763  {
    13764  const VkMemoryPropertyFlags currFlags =
    13765  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13766  // This memory type contains requiredFlags.
    13767  if((requiredFlags & ~currFlags) == 0)
    13768  {
    13769  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13770  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13771  // Remember memory type with lowest cost.
    13772  if(currCost < minCost)
    13773  {
    13774  *pMemoryTypeIndex = memTypeIndex;
    13775  if(currCost == 0)
    13776  {
    13777  return VK_SUCCESS;
    13778  }
    13779  minCost = currCost;
    13780  }
    13781  }
    13782  }
    13783  }
    13784  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13785 }
    13786 
    13788  VmaAllocator allocator,
    13789  const VkBufferCreateInfo* pBufferCreateInfo,
    13790  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13791  uint32_t* pMemoryTypeIndex)
    13792 {
    13793  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13794  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13795  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13796  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13797 
    13798  const VkDevice hDev = allocator->m_hDevice;
    13799  VkBuffer hBuffer = VK_NULL_HANDLE;
    13800  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13801  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13802  if(res == VK_SUCCESS)
    13803  {
    13804  VkMemoryRequirements memReq = {};
    13805  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13806  hDev, hBuffer, &memReq);
    13807 
    13808  res = vmaFindMemoryTypeIndex(
    13809  allocator,
    13810  memReq.memoryTypeBits,
    13811  pAllocationCreateInfo,
    13812  pMemoryTypeIndex);
    13813 
    13814  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13815  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13816  }
    13817  return res;
    13818 }
    13819 
    13821  VmaAllocator allocator,
    13822  const VkImageCreateInfo* pImageCreateInfo,
    13823  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13824  uint32_t* pMemoryTypeIndex)
    13825 {
    13826  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13827  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13828  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13829  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13830 
    13831  const VkDevice hDev = allocator->m_hDevice;
    13832  VkImage hImage = VK_NULL_HANDLE;
    13833  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13834  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13835  if(res == VK_SUCCESS)
    13836  {
    13837  VkMemoryRequirements memReq = {};
    13838  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13839  hDev, hImage, &memReq);
    13840 
    13841  res = vmaFindMemoryTypeIndex(
    13842  allocator,
    13843  memReq.memoryTypeBits,
    13844  pAllocationCreateInfo,
    13845  pMemoryTypeIndex);
    13846 
    13847  allocator->GetVulkanFunctions().vkDestroyImage(
    13848  hDev, hImage, allocator->GetAllocationCallbacks());
    13849  }
    13850  return res;
    13851 }
    13852 
    13853 VkResult vmaCreatePool(
    13854  VmaAllocator allocator,
    13855  const VmaPoolCreateInfo* pCreateInfo,
    13856  VmaPool* pPool)
    13857 {
    13858  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13859 
    13860  VMA_DEBUG_LOG("vmaCreatePool");
    13861 
    13862  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13863 
    13864  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13865 
    13866 #if VMA_RECORDING_ENABLED
    13867  if(allocator->GetRecorder() != VMA_NULL)
    13868  {
    13869  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13870  }
    13871 #endif
    13872 
    13873  return res;
    13874 }
    13875 
    13876 void vmaDestroyPool(
    13877  VmaAllocator allocator,
    13878  VmaPool pool)
    13879 {
    13880  VMA_ASSERT(allocator);
    13881 
    13882  if(pool == VK_NULL_HANDLE)
    13883  {
    13884  return;
    13885  }
    13886 
    13887  VMA_DEBUG_LOG("vmaDestroyPool");
    13888 
    13889  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13890 
    13891 #if VMA_RECORDING_ENABLED
    13892  if(allocator->GetRecorder() != VMA_NULL)
    13893  {
    13894  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13895  }
    13896 #endif
    13897 
    13898  allocator->DestroyPool(pool);
    13899 }
    13900 
    13901 void vmaGetPoolStats(
    13902  VmaAllocator allocator,
    13903  VmaPool pool,
    13904  VmaPoolStats* pPoolStats)
    13905 {
    13906  VMA_ASSERT(allocator && pool && pPoolStats);
    13907 
    13908  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13909 
    13910  allocator->GetPoolStats(pool, pPoolStats);
    13911 }
    13912 
    13914  VmaAllocator allocator,
    13915  VmaPool pool,
    13916  size_t* pLostAllocationCount)
    13917 {
    13918  VMA_ASSERT(allocator && pool);
    13919 
    13920  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13921 
    13922 #if VMA_RECORDING_ENABLED
    13923  if(allocator->GetRecorder() != VMA_NULL)
    13924  {
    13925  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13926  }
    13927 #endif
    13928 
    13929  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13930 }
    13931 
    13932 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13933 {
    13934  VMA_ASSERT(allocator && pool);
    13935 
    13936  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13937 
    13938  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13939 
    13940  return allocator->CheckPoolCorruption(pool);
    13941 }
    13942 
    13943 VkResult vmaAllocateMemory(
    13944  VmaAllocator allocator,
    13945  const VkMemoryRequirements* pVkMemoryRequirements,
    13946  const VmaAllocationCreateInfo* pCreateInfo,
    13947  VmaAllocation* pAllocation,
    13948  VmaAllocationInfo* pAllocationInfo)
    13949 {
    13950  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13951 
    13952  VMA_DEBUG_LOG("vmaAllocateMemory");
    13953 
    13954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13955 
    13956  VkResult result = allocator->AllocateMemory(
    13957  *pVkMemoryRequirements,
    13958  false, // requiresDedicatedAllocation
    13959  false, // prefersDedicatedAllocation
    13960  VK_NULL_HANDLE, // dedicatedBuffer
    13961  VK_NULL_HANDLE, // dedicatedImage
    13962  *pCreateInfo,
    13963  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13964  pAllocation);
    13965 
    13966 #if VMA_RECORDING_ENABLED
    13967  if(allocator->GetRecorder() != VMA_NULL)
    13968  {
    13969  allocator->GetRecorder()->RecordAllocateMemory(
    13970  allocator->GetCurrentFrameIndex(),
    13971  *pVkMemoryRequirements,
    13972  *pCreateInfo,
    13973  *pAllocation);
    13974  }
    13975 #endif
    13976 
    13977  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13978  {
    13979  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13980  }
    13981 
    13982  return result;
    13983 }
    13984 
    13986  VmaAllocator allocator,
    13987  VkBuffer buffer,
    13988  const VmaAllocationCreateInfo* pCreateInfo,
    13989  VmaAllocation* pAllocation,
    13990  VmaAllocationInfo* pAllocationInfo)
    13991 {
    13992  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13993 
    13994  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13995 
    13996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13997 
    13998  VkMemoryRequirements vkMemReq = {};
    13999  bool requiresDedicatedAllocation = false;
    14000  bool prefersDedicatedAllocation = false;
    14001  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14002  requiresDedicatedAllocation,
    14003  prefersDedicatedAllocation);
    14004 
    14005  VkResult result = allocator->AllocateMemory(
    14006  vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation,
    14009  buffer, // dedicatedBuffer
    14010  VK_NULL_HANDLE, // dedicatedImage
    14011  *pCreateInfo,
    14012  VMA_SUBALLOCATION_TYPE_BUFFER,
    14013  pAllocation);
    14014 
    14015 #if VMA_RECORDING_ENABLED
    14016  if(allocator->GetRecorder() != VMA_NULL)
    14017  {
    14018  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14019  allocator->GetCurrentFrameIndex(),
    14020  vkMemReq,
    14021  requiresDedicatedAllocation,
    14022  prefersDedicatedAllocation,
    14023  *pCreateInfo,
    14024  *pAllocation);
    14025  }
    14026 #endif
    14027 
    14028  if(pAllocationInfo && result == VK_SUCCESS)
    14029  {
    14030  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14031  }
    14032 
    14033  return result;
    14034 }
    14035 
    14036 VkResult vmaAllocateMemoryForImage(
    14037  VmaAllocator allocator,
    14038  VkImage image,
    14039  const VmaAllocationCreateInfo* pCreateInfo,
    14040  VmaAllocation* pAllocation,
    14041  VmaAllocationInfo* pAllocationInfo)
    14042 {
    14043  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14044 
    14045  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14046 
    14047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14048 
    14049  VkMemoryRequirements vkMemReq = {};
    14050  bool requiresDedicatedAllocation = false;
    14051  bool prefersDedicatedAllocation = false;
    14052  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14053  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14054 
    14055  VkResult result = allocator->AllocateMemory(
    14056  vkMemReq,
    14057  requiresDedicatedAllocation,
    14058  prefersDedicatedAllocation,
    14059  VK_NULL_HANDLE, // dedicatedBuffer
    14060  image, // dedicatedImage
    14061  *pCreateInfo,
    14062  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14063  pAllocation);
    14064 
    14065 #if VMA_RECORDING_ENABLED
    14066  if(allocator->GetRecorder() != VMA_NULL)
    14067  {
    14068  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14069  allocator->GetCurrentFrameIndex(),
    14070  vkMemReq,
    14071  requiresDedicatedAllocation,
    14072  prefersDedicatedAllocation,
    14073  *pCreateInfo,
    14074  *pAllocation);
    14075  }
    14076 #endif
    14077 
    14078  if(pAllocationInfo && result == VK_SUCCESS)
    14079  {
    14080  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14081  }
    14082 
    14083  return result;
    14084 }
    14085 
    14086 void vmaFreeMemory(
    14087  VmaAllocator allocator,
    14088  VmaAllocation allocation)
    14089 {
    14090  VMA_ASSERT(allocator);
    14091 
    14092  if(allocation == VK_NULL_HANDLE)
    14093  {
    14094  return;
    14095  }
    14096 
    14097  VMA_DEBUG_LOG("vmaFreeMemory");
    14098 
    14099  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14100 
    14101 #if VMA_RECORDING_ENABLED
    14102  if(allocator->GetRecorder() != VMA_NULL)
    14103  {
    14104  allocator->GetRecorder()->RecordFreeMemory(
    14105  allocator->GetCurrentFrameIndex(),
    14106  allocation);
    14107  }
    14108 #endif
    14109 
    14110  allocator->FreeMemory(allocation);
    14111 }
    14112 
    14113 VkResult vmaResizeAllocation(
    14114  VmaAllocator allocator,
    14115  VmaAllocation allocation,
    14116  VkDeviceSize newSize)
    14117 {
    14118  VMA_ASSERT(allocator && allocation);
    14119 
    14120  VMA_DEBUG_LOG("vmaResizeAllocation");
    14121 
    14122  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14123 
    14124 #if VMA_RECORDING_ENABLED
    14125  if(allocator->GetRecorder() != VMA_NULL)
    14126  {
    14127  allocator->GetRecorder()->RecordResizeAllocation(
    14128  allocator->GetCurrentFrameIndex(),
    14129  allocation,
    14130  newSize);
    14131  }
    14132 #endif
    14133 
    14134  return allocator->ResizeAllocation(allocation, newSize);
    14135 }
    14136 
    14138  VmaAllocator allocator,
    14139  VmaAllocation allocation,
    14140  VmaAllocationInfo* pAllocationInfo)
    14141 {
    14142  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14143 
    14144  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14145 
    14146 #if VMA_RECORDING_ENABLED
    14147  if(allocator->GetRecorder() != VMA_NULL)
    14148  {
    14149  allocator->GetRecorder()->RecordGetAllocationInfo(
    14150  allocator->GetCurrentFrameIndex(),
    14151  allocation);
    14152  }
    14153 #endif
    14154 
    14155  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14156 }
    14157 
    14158 VkBool32 vmaTouchAllocation(
    14159  VmaAllocator allocator,
    14160  VmaAllocation allocation)
    14161 {
    14162  VMA_ASSERT(allocator && allocation);
    14163 
    14164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14165 
    14166 #if VMA_RECORDING_ENABLED
    14167  if(allocator->GetRecorder() != VMA_NULL)
    14168  {
    14169  allocator->GetRecorder()->RecordTouchAllocation(
    14170  allocator->GetCurrentFrameIndex(),
    14171  allocation);
    14172  }
    14173 #endif
    14174 
    14175  return allocator->TouchAllocation(allocation);
    14176 }
    14177 
    14179  VmaAllocator allocator,
    14180  VmaAllocation allocation,
    14181  void* pUserData)
    14182 {
    14183  VMA_ASSERT(allocator && allocation);
    14184 
    14185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14186 
    14187  allocation->SetUserData(allocator, pUserData);
    14188 
    14189 #if VMA_RECORDING_ENABLED
    14190  if(allocator->GetRecorder() != VMA_NULL)
    14191  {
    14192  allocator->GetRecorder()->RecordSetAllocationUserData(
    14193  allocator->GetCurrentFrameIndex(),
    14194  allocation,
    14195  pUserData);
    14196  }
    14197 #endif
    14198 }
    14199 
    14201  VmaAllocator allocator,
    14202  VmaAllocation* pAllocation)
    14203 {
    14204  VMA_ASSERT(allocator && pAllocation);
    14205 
    14206  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14207 
    14208  allocator->CreateLostAllocation(pAllocation);
    14209 
    14210 #if VMA_RECORDING_ENABLED
    14211  if(allocator->GetRecorder() != VMA_NULL)
    14212  {
    14213  allocator->GetRecorder()->RecordCreateLostAllocation(
    14214  allocator->GetCurrentFrameIndex(),
    14215  *pAllocation);
    14216  }
    14217 #endif
    14218 }
    14219 
    14220 VkResult vmaMapMemory(
    14221  VmaAllocator allocator,
    14222  VmaAllocation allocation,
    14223  void** ppData)
    14224 {
    14225  VMA_ASSERT(allocator && allocation && ppData);
    14226 
    14227  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14228 
    14229  VkResult res = allocator->Map(allocation, ppData);
    14230 
    14231 #if VMA_RECORDING_ENABLED
    14232  if(allocator->GetRecorder() != VMA_NULL)
    14233  {
    14234  allocator->GetRecorder()->RecordMapMemory(
    14235  allocator->GetCurrentFrameIndex(),
    14236  allocation);
    14237  }
    14238 #endif
    14239 
    14240  return res;
    14241 }
    14242 
    14243 void vmaUnmapMemory(
    14244  VmaAllocator allocator,
    14245  VmaAllocation allocation)
    14246 {
    14247  VMA_ASSERT(allocator && allocation);
    14248 
    14249  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14250 
    14251 #if VMA_RECORDING_ENABLED
    14252  if(allocator->GetRecorder() != VMA_NULL)
    14253  {
    14254  allocator->GetRecorder()->RecordUnmapMemory(
    14255  allocator->GetCurrentFrameIndex(),
    14256  allocation);
    14257  }
    14258 #endif
    14259 
    14260  allocator->Unmap(allocation);
    14261 }
    14262 
    14263 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14264 {
    14265  VMA_ASSERT(allocator && allocation);
    14266 
    14267  VMA_DEBUG_LOG("vmaFlushAllocation");
    14268 
    14269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14270 
    14271  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14272 
    14273 #if VMA_RECORDING_ENABLED
    14274  if(allocator->GetRecorder() != VMA_NULL)
    14275  {
    14276  allocator->GetRecorder()->RecordFlushAllocation(
    14277  allocator->GetCurrentFrameIndex(),
    14278  allocation, offset, size);
    14279  }
    14280 #endif
    14281 }
    14282 
    14283 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14284 {
    14285  VMA_ASSERT(allocator && allocation);
    14286 
    14287  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14288 
    14289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14290 
    14291  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14292 
    14293 #if VMA_RECORDING_ENABLED
    14294  if(allocator->GetRecorder() != VMA_NULL)
    14295  {
    14296  allocator->GetRecorder()->RecordInvalidateAllocation(
    14297  allocator->GetCurrentFrameIndex(),
    14298  allocation, offset, size);
    14299  }
    14300 #endif
    14301 }
    14302 
    14303 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14304 {
    14305  VMA_ASSERT(allocator);
    14306 
    14307  VMA_DEBUG_LOG("vmaCheckCorruption");
    14308 
    14309  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14310 
    14311  return allocator->CheckCorruption(memoryTypeBits);
    14312 }
    14313 
    14314 VkResult vmaDefragment(
    14315  VmaAllocator allocator,
    14316  VmaAllocation* pAllocations,
    14317  size_t allocationCount,
    14318  VkBool32* pAllocationsChanged,
    14319  const VmaDefragmentationInfo *pDefragmentationInfo,
    14320  VmaDefragmentationStats* pDefragmentationStats)
    14321 {
    14322  VMA_ASSERT(allocator && pAllocations);
    14323 
    14324  VMA_DEBUG_LOG("vmaDefragment");
    14325 
    14326  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14327 
    14328  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14329 }
    14330 
    14331 VkResult vmaBindBufferMemory(
    14332  VmaAllocator allocator,
    14333  VmaAllocation allocation,
    14334  VkBuffer buffer)
    14335 {
    14336  VMA_ASSERT(allocator && allocation && buffer);
    14337 
    14338  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14339 
    14340  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14341 
    14342  return allocator->BindBufferMemory(allocation, buffer);
    14343 }
    14344 
    14345 VkResult vmaBindImageMemory(
    14346  VmaAllocator allocator,
    14347  VmaAllocation allocation,
    14348  VkImage image)
    14349 {
    14350  VMA_ASSERT(allocator && allocation && image);
    14351 
    14352  VMA_DEBUG_LOG("vmaBindImageMemory");
    14353 
    14354  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14355 
    14356  return allocator->BindImageMemory(allocation, image);
    14357 }
    14358 
    14359 VkResult vmaCreateBuffer(
    14360  VmaAllocator allocator,
    14361  const VkBufferCreateInfo* pBufferCreateInfo,
    14362  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14363  VkBuffer* pBuffer,
    14364  VmaAllocation* pAllocation,
    14365  VmaAllocationInfo* pAllocationInfo)
    14366 {
    14367  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14368 
    14369  if(pBufferCreateInfo->size == 0)
    14370  {
    14371  return VK_ERROR_VALIDATION_FAILED_EXT;
    14372  }
    14373 
    14374  VMA_DEBUG_LOG("vmaCreateBuffer");
    14375 
    14376  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14377 
    14378  *pBuffer = VK_NULL_HANDLE;
    14379  *pAllocation = VK_NULL_HANDLE;
    14380 
    14381  // 1. Create VkBuffer.
    14382  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14383  allocator->m_hDevice,
    14384  pBufferCreateInfo,
    14385  allocator->GetAllocationCallbacks(),
    14386  pBuffer);
    14387  if(res >= 0)
    14388  {
    14389  // 2. vkGetBufferMemoryRequirements.
    14390  VkMemoryRequirements vkMemReq = {};
    14391  bool requiresDedicatedAllocation = false;
    14392  bool prefersDedicatedAllocation = false;
    14393  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14394  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14395 
    14396  // Make sure alignment requirements for specific buffer usages reported
    14397  // in Physical Device Properties are included in alignment reported by memory requirements.
    14398  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14399  {
    14400  VMA_ASSERT(vkMemReq.alignment %
    14401  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14402  }
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14412  }
    14413 
    14414  // 3. Allocate memory using allocator.
    14415  res = allocator->AllocateMemory(
    14416  vkMemReq,
    14417  requiresDedicatedAllocation,
    14418  prefersDedicatedAllocation,
    14419  *pBuffer, // dedicatedBuffer
    14420  VK_NULL_HANDLE, // dedicatedImage
    14421  *pAllocationCreateInfo,
    14422  VMA_SUBALLOCATION_TYPE_BUFFER,
    14423  pAllocation);
    14424 
    14425 #if VMA_RECORDING_ENABLED
    14426  if(allocator->GetRecorder() != VMA_NULL)
    14427  {
    14428  allocator->GetRecorder()->RecordCreateBuffer(
    14429  allocator->GetCurrentFrameIndex(),
    14430  *pBufferCreateInfo,
    14431  *pAllocationCreateInfo,
    14432  *pAllocation);
    14433  }
    14434 #endif
    14435 
    14436  if(res >= 0)
    14437  {
    14438  // 3. Bind buffer with memory.
    14439  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14440  if(res >= 0)
    14441  {
    14442  // All steps succeeded.
    14443  #if VMA_STATS_STRING_ENABLED
    14444  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14445  #endif
    14446  if(pAllocationInfo != VMA_NULL)
    14447  {
    14448  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14449  }
    14450 
    14451  return VK_SUCCESS;
    14452  }
    14453  allocator->FreeMemory(*pAllocation);
    14454  *pAllocation = VK_NULL_HANDLE;
    14455  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14456  *pBuffer = VK_NULL_HANDLE;
    14457  return res;
    14458  }
    14459  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14460  *pBuffer = VK_NULL_HANDLE;
    14461  return res;
    14462  }
    14463  return res;
    14464 }
    14465 
    14466 void vmaDestroyBuffer(
    14467  VmaAllocator allocator,
    14468  VkBuffer buffer,
    14469  VmaAllocation allocation)
    14470 {
    14471  VMA_ASSERT(allocator);
    14472 
    14473  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14474  {
    14475  return;
    14476  }
    14477 
    14478  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14479 
    14480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14481 
    14482 #if VMA_RECORDING_ENABLED
    14483  if(allocator->GetRecorder() != VMA_NULL)
    14484  {
    14485  allocator->GetRecorder()->RecordDestroyBuffer(
    14486  allocator->GetCurrentFrameIndex(),
    14487  allocation);
    14488  }
    14489 #endif
    14490 
    14491  if(buffer != VK_NULL_HANDLE)
    14492  {
    14493  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14494  }
    14495 
    14496  if(allocation != VK_NULL_HANDLE)
    14497  {
    14498  allocator->FreeMemory(allocation);
    14499  }
    14500 }
    14501 
    14502 VkResult vmaCreateImage(
    14503  VmaAllocator allocator,
    14504  const VkImageCreateInfo* pImageCreateInfo,
    14505  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14506  VkImage* pImage,
    14507  VmaAllocation* pAllocation,
    14508  VmaAllocationInfo* pAllocationInfo)
    14509 {
    14510  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14511 
    14512  if(pImageCreateInfo->extent.width == 0 ||
    14513  pImageCreateInfo->extent.height == 0 ||
    14514  pImageCreateInfo->extent.depth == 0 ||
    14515  pImageCreateInfo->mipLevels == 0 ||
    14516  pImageCreateInfo->arrayLayers == 0)
    14517  {
    14518  return VK_ERROR_VALIDATION_FAILED_EXT;
    14519  }
    14520 
    14521  VMA_DEBUG_LOG("vmaCreateImage");
    14522 
    14523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14524 
    14525  *pImage = VK_NULL_HANDLE;
    14526  *pAllocation = VK_NULL_HANDLE;
    14527 
    14528  // 1. Create VkImage.
    14529  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14530  allocator->m_hDevice,
    14531  pImageCreateInfo,
    14532  allocator->GetAllocationCallbacks(),
    14533  pImage);
    14534  if(res >= 0)
    14535  {
    14536  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14537  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14538  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14539 
    14540  // 2. Allocate memory using allocator.
    14541  VkMemoryRequirements vkMemReq = {};
    14542  bool requiresDedicatedAllocation = false;
    14543  bool prefersDedicatedAllocation = false;
    14544  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14545  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14546 
    14547  res = allocator->AllocateMemory(
    14548  vkMemReq,
    14549  requiresDedicatedAllocation,
    14550  prefersDedicatedAllocation,
    14551  VK_NULL_HANDLE, // dedicatedBuffer
    14552  *pImage, // dedicatedImage
    14553  *pAllocationCreateInfo,
    14554  suballocType,
    14555  pAllocation);
    14556 
    14557 #if VMA_RECORDING_ENABLED
    14558  if(allocator->GetRecorder() != VMA_NULL)
    14559  {
    14560  allocator->GetRecorder()->RecordCreateImage(
    14561  allocator->GetCurrentFrameIndex(),
    14562  *pImageCreateInfo,
    14563  *pAllocationCreateInfo,
    14564  *pAllocation);
    14565  }
    14566 #endif
    14567 
    14568  if(res >= 0)
    14569  {
    14570  // 3. Bind image with memory.
    14571  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14572  if(res >= 0)
    14573  {
    14574  // All steps succeeded.
    14575  #if VMA_STATS_STRING_ENABLED
    14576  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14577  #endif
    14578  if(pAllocationInfo != VMA_NULL)
    14579  {
    14580  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14581  }
    14582 
    14583  return VK_SUCCESS;
    14584  }
    14585  allocator->FreeMemory(*pAllocation);
    14586  *pAllocation = VK_NULL_HANDLE;
    14587  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14588  *pImage = VK_NULL_HANDLE;
    14589  return res;
    14590  }
    14591  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14592  *pImage = VK_NULL_HANDLE;
    14593  return res;
    14594  }
    14595  return res;
    14596 }
    14597 
    14598 void vmaDestroyImage(
    14599  VmaAllocator allocator,
    14600  VkImage image,
    14601  VmaAllocation allocation)
    14602 {
    14603  VMA_ASSERT(allocator);
    14604 
    14605  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14606  {
    14607  return;
    14608  }
    14609 
    14610  VMA_DEBUG_LOG("vmaDestroyImage");
    14611 
    14612  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14613 
    14614 #if VMA_RECORDING_ENABLED
    14615  if(allocator->GetRecorder() != VMA_NULL)
    14616  {
    14617  allocator->GetRecorder()->RecordDestroyImage(
    14618  allocator->GetCurrentFrameIndex(),
    14619  allocation);
    14620  }
    14621 #endif
    14622 
    14623  if(image != VK_NULL_HANDLE)
    14624  {
    14625  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14626  }
    14627  if(allocation != VK_NULL_HANDLE)
    14628  {
    14629  allocator->FreeMemory(allocation);
    14630  }
    14631 }
    14632 
    14633 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1584
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1885
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1693  const VkDeviceSize* pHeapSizeLimit;
    1714 
    1716 VkResult vmaCreateAllocator(
    1717  const VmaAllocatorCreateInfo* pCreateInfo,
    1718  VmaAllocator* pAllocator);
    1719 
    1721 void vmaDestroyAllocator(
    1722  VmaAllocator allocator);
    1723 
    1729  VmaAllocator allocator,
    1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1731 
    1737  VmaAllocator allocator,
    1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1739 
    1747  VmaAllocator allocator,
    1748  uint32_t memoryTypeIndex,
    1749  VkMemoryPropertyFlags* pFlags);
    1750 
    1760  VmaAllocator allocator,
    1761  uint32_t frameIndex);
    1762 
    1765 typedef struct VmaStatInfo
    1766 {
    1768  uint32_t blockCount;
    1774  VkDeviceSize usedBytes;
    1776  VkDeviceSize unusedBytes;
    1779 } VmaStatInfo;
    1780 
    1782 typedef struct VmaStats
    1783 {
    1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1787 } VmaStats;
    1788 
    1790 void vmaCalculateStats(
    1791  VmaAllocator allocator,
    1792  VmaStats* pStats);
    1793 
    1794 #define VMA_STATS_STRING_ENABLED 1
    1795 
    1796 #if VMA_STATS_STRING_ENABLED
    1797 
    1799 
    1801 void vmaBuildStatsString(
    1802  VmaAllocator allocator,
    1803  char** ppStatsString,
    1804  VkBool32 detailedMap);
    1805 
    1806 void vmaFreeStatsString(
    1807  VmaAllocator allocator,
    1808  char* pStatsString);
    1809 
    1810 #endif // #if VMA_STATS_STRING_ENABLED
    1811 
    1820 VK_DEFINE_HANDLE(VmaPool)
    1821 
    1822 typedef enum VmaMemoryUsage
    1823 {
    1872 } VmaMemoryUsage;
    1873 
    1888 
    1943 
    1956 
    1966 
    1973 
    1977 
    1979 {
    1992  VkMemoryPropertyFlags requiredFlags;
    1997  VkMemoryPropertyFlags preferredFlags;
    2005  uint32_t memoryTypeBits;
    2018  void* pUserData;
    2020 
    2037 VkResult vmaFindMemoryTypeIndex(
    2038  VmaAllocator allocator,
    2039  uint32_t memoryTypeBits,
    2040  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2041  uint32_t* pMemoryTypeIndex);
    2042 
    2056  VmaAllocator allocator,
    2057  const VkBufferCreateInfo* pBufferCreateInfo,
    2058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2059  uint32_t* pMemoryTypeIndex);
    2060 
    2074  VmaAllocator allocator,
    2075  const VkImageCreateInfo* pImageCreateInfo,
    2076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2077  uint32_t* pMemoryTypeIndex);
    2078 
    2099 
    2116 
    2127 
    2133 
    2136 typedef VkFlags VmaPoolCreateFlags;
    2137 
    2140 typedef struct VmaPoolCreateInfo {
    2155  VkDeviceSize blockSize;
    2184 
    2187 typedef struct VmaPoolStats {
    2190  VkDeviceSize size;
    2193  VkDeviceSize unusedSize;
    2206  VkDeviceSize unusedRangeSizeMax;
    2209  size_t blockCount;
    2210 } VmaPoolStats;
    2211 
    2218 VkResult vmaCreatePool(
    2219  VmaAllocator allocator,
    2220  const VmaPoolCreateInfo* pCreateInfo,
    2221  VmaPool* pPool);
    2222 
    2225 void vmaDestroyPool(
    2226  VmaAllocator allocator,
    2227  VmaPool pool);
    2228 
    2235 void vmaGetPoolStats(
    2236  VmaAllocator allocator,
    2237  VmaPool pool,
    2238  VmaPoolStats* pPoolStats);
    2239 
    2247  VmaAllocator allocator,
    2248  VmaPool pool,
    2249  size_t* pLostAllocationCount);
    2250 
    2265 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2266 
    2291 VK_DEFINE_HANDLE(VmaAllocation)
    2292 
    2293 
    2295 typedef struct VmaAllocationInfo {
    2300  uint32_t memoryType;
    2309  VkDeviceMemory deviceMemory;
    2314  VkDeviceSize offset;
    2319  VkDeviceSize size;
    2333  void* pUserData;
    2335 
    2346 VkResult vmaAllocateMemory(
    2347  VmaAllocator allocator,
    2348  const VkMemoryRequirements* pVkMemoryRequirements,
    2349  const VmaAllocationCreateInfo* pCreateInfo,
    2350  VmaAllocation* pAllocation,
    2351  VmaAllocationInfo* pAllocationInfo);
    2352 
    2360  VmaAllocator allocator,
    2361  VkBuffer buffer,
    2362  const VmaAllocationCreateInfo* pCreateInfo,
    2363  VmaAllocation* pAllocation,
    2364  VmaAllocationInfo* pAllocationInfo);
    2365 
    2367 VkResult vmaAllocateMemoryForImage(
    2368  VmaAllocator allocator,
    2369  VkImage image,
    2370  const VmaAllocationCreateInfo* pCreateInfo,
    2371  VmaAllocation* pAllocation,
    2372  VmaAllocationInfo* pAllocationInfo);
    2373 
    2375 void vmaFreeMemory(
    2376  VmaAllocator allocator,
    2377  VmaAllocation allocation);
    2378 
    2399 VkResult vmaResizeAllocation(
    2400  VmaAllocator allocator,
    2401  VmaAllocation allocation,
    2402  VkDeviceSize newSize);
    2403 
    2421  VmaAllocator allocator,
    2422  VmaAllocation allocation,
    2423  VmaAllocationInfo* pAllocationInfo);
    2424 
    2439 VkBool32 vmaTouchAllocation(
    2440  VmaAllocator allocator,
    2441  VmaAllocation allocation);
    2442 
    2457  VmaAllocator allocator,
    2458  VmaAllocation allocation,
    2459  void* pUserData);
    2460 
    2472  VmaAllocator allocator,
    2473  VmaAllocation* pAllocation);
    2474 
    2509 VkResult vmaMapMemory(
    2510  VmaAllocator allocator,
    2511  VmaAllocation allocation,
    2512  void** ppData);
    2513 
    2518 void vmaUnmapMemory(
    2519  VmaAllocator allocator,
    2520  VmaAllocation allocation);
    2521 
    2534 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2535 
    2548 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2549 
    2566 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2567 
    2569 typedef struct VmaDefragmentationInfo {
    2574  VkDeviceSize maxBytesToMove;
    2581 
    2583 typedef struct VmaDefragmentationStats {
    2585  VkDeviceSize bytesMoved;
    2587  VkDeviceSize bytesFreed;
    2593 
    2632 VkResult vmaDefragment(
    2633  VmaAllocator allocator,
    2634  VmaAllocation* pAllocations,
    2635  size_t allocationCount,
    2636  VkBool32* pAllocationsChanged,
    2637  const VmaDefragmentationInfo *pDefragmentationInfo,
    2638  VmaDefragmentationStats* pDefragmentationStats);
    2639 
    2652 VkResult vmaBindBufferMemory(
    2653  VmaAllocator allocator,
    2654  VmaAllocation allocation,
    2655  VkBuffer buffer);
    2656 
    2669 VkResult vmaBindImageMemory(
    2670  VmaAllocator allocator,
    2671  VmaAllocation allocation,
    2672  VkImage image);
    2673 
    2700 VkResult vmaCreateBuffer(
    2701  VmaAllocator allocator,
    2702  const VkBufferCreateInfo* pBufferCreateInfo,
    2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2704  VkBuffer* pBuffer,
    2705  VmaAllocation* pAllocation,
    2706  VmaAllocationInfo* pAllocationInfo);
    2707 
    2719 void vmaDestroyBuffer(
    2720  VmaAllocator allocator,
    2721  VkBuffer buffer,
    2722  VmaAllocation allocation);
    2723 
    2725 VkResult vmaCreateImage(
    2726  VmaAllocator allocator,
    2727  const VkImageCreateInfo* pImageCreateInfo,
    2728  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2729  VkImage* pImage,
    2730  VmaAllocation* pAllocation,
    2731  VmaAllocationInfo* pAllocationInfo);
    2732 
    2744 void vmaDestroyImage(
    2745  VmaAllocator allocator,
    2746  VkImage image,
    2747  VmaAllocation allocation);
    2748 
    2749 #ifdef __cplusplus
    2750 }
    2751 #endif
    2752 
    2753 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2754 
    2755 // For Visual Studio IntelliSense.
    2756 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2757 #define VMA_IMPLEMENTATION
    2758 #endif
    2759 
    2760 #ifdef VMA_IMPLEMENTATION
    2761 #undef VMA_IMPLEMENTATION
    2762 
    2763 #include <cstdint>
    2764 #include <cstdlib>
    2765 #include <cstring>
    2766 
    2767 /*******************************************************************************
    2768 CONFIGURATION SECTION
    2769 
    2770 Define some of these macros before each #include of this header or change them
    2771 here if you need other then default behavior depending on your environment.
    2772 */
    2773 
    2774 /*
    2775 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2776 internally, like:
    2777 
    2778  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2779 
    2780 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2781 VmaAllocatorCreateInfo::pVulkanFunctions.
    2782 */
    2783 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2784 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2785 #endif
    2786 
    2787 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2788 //#define VMA_USE_STL_CONTAINERS 1
    2789 
    2790 /* Set this macro to 1 to make the library including and using STL containers:
    2791 std::pair, std::vector, std::list, std::unordered_map.
    2792 
    2793 Set it to 0 or undefined to make the library using its own implementation of
    2794 the containers.
    2795 */
    2796 #if VMA_USE_STL_CONTAINERS
    2797  #define VMA_USE_STL_VECTOR 1
    2798  #define VMA_USE_STL_UNORDERED_MAP 1
    2799  #define VMA_USE_STL_LIST 1
    2800 #endif
    2801 
    2802 #if VMA_USE_STL_VECTOR
    2803  #include <vector>
    2804 #endif
    2805 
    2806 #if VMA_USE_STL_UNORDERED_MAP
    2807  #include <unordered_map>
    2808 #endif
    2809 
    2810 #if VMA_USE_STL_LIST
    2811  #include <list>
    2812 #endif
    2813 
    2814 /*
    2815 Following headers are used in this CONFIGURATION section only, so feel free to
    2816 remove them if not needed.
    2817 */
    2818 #include <cassert> // for assert
    2819 #include <algorithm> // for min, max
    2820 #include <mutex> // for std::mutex
    2821 #include <atomic> // for std::atomic
    2822 
    2823 #ifndef VMA_NULL
    2824  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2825  #define VMA_NULL nullptr
    2826 #endif
    2827 
    2828 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2829 #include <cstdlib>
    2830 void *aligned_alloc(size_t alignment, size_t size)
    2831 {
    2832  // alignment must be >= sizeof(void*)
    2833  if(alignment < sizeof(void*))
    2834  {
    2835  alignment = sizeof(void*);
    2836  }
    2837 
    2838  return memalign(alignment, size);
    2839 }
    2840 #elif defined(__APPLE__) || defined(__ANDROID__)
    2841 #include <cstdlib>
    2842 void *aligned_alloc(size_t alignment, size_t size)
    2843 {
    2844  // alignment must be >= sizeof(void*)
    2845  if(alignment < sizeof(void*))
    2846  {
    2847  alignment = sizeof(void*);
    2848  }
    2849 
    2850  void *pointer;
    2851  if(posix_memalign(&pointer, alignment, size) == 0)
    2852  return pointer;
    2853  return VMA_NULL;
    2854 }
    2855 #endif
    2856 
    2857 // If your compiler is not compatible with C++11 and definition of
    2858 // aligned_alloc() function is missing, uncommeting following line may help:
    2859 
    2860 //#include <malloc.h>
    2861 
    2862 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2863 #ifndef VMA_ASSERT
    2864  #ifdef _DEBUG
    2865  #define VMA_ASSERT(expr) assert(expr)
    2866  #else
    2867  #define VMA_ASSERT(expr)
    2868  #endif
    2869 #endif
    2870 
    2871 // Assert that will be called very often, like inside data structures e.g. operator[].
    2872 // Making it non-empty can make program slow.
    2873 #ifndef VMA_HEAVY_ASSERT
    2874  #ifdef _DEBUG
    2875  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2876  #else
    2877  #define VMA_HEAVY_ASSERT(expr)
    2878  #endif
    2879 #endif
    2880 
    2881 #ifndef VMA_ALIGN_OF
    2882  #define VMA_ALIGN_OF(type) (__alignof(type))
    2883 #endif
    2884 
    2885 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2886  #if defined(_WIN32)
    2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2888  #else
    2889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2890  #endif
    2891 #endif
    2892 
    2893 #ifndef VMA_SYSTEM_FREE
    2894  #if defined(_WIN32)
    2895  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2896  #else
    2897  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2898  #endif
    2899 #endif
    2900 
    2901 #ifndef VMA_MIN
    2902  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2903 #endif
    2904 
    2905 #ifndef VMA_MAX
    2906  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2907 #endif
    2908 
    2909 #ifndef VMA_SWAP
    2910  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2911 #endif
    2912 
    2913 #ifndef VMA_SORT
    2914  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2915 #endif
    2916 
    2917 #ifndef VMA_DEBUG_LOG
    2918  #define VMA_DEBUG_LOG(format, ...)
    2919  /*
    2920  #define VMA_DEBUG_LOG(format, ...) do { \
    2921  printf(format, __VA_ARGS__); \
    2922  printf("\n"); \
    2923  } while(false)
    2924  */
    2925 #endif
    2926 
    2927 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2928 #if VMA_STATS_STRING_ENABLED
    2929  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2930  {
    2931  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2932  }
    2933  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2934  {
    2935  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2936  }
    2937  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2938  {
    2939  snprintf(outStr, strLen, "%p", ptr);
    2940  }
    2941 #endif
    2942 
    2943 #ifndef VMA_MUTEX
    2944  class VmaMutex
    2945  {
    2946  public:
    2947  VmaMutex() { }
    2948  ~VmaMutex() { }
    2949  void Lock() { m_Mutex.lock(); }
    2950  void Unlock() { m_Mutex.unlock(); }
    2951  private:
    2952  std::mutex m_Mutex;
    2953  };
    2954  #define VMA_MUTEX VmaMutex
    2955 #endif
    2956 
    2957 /*
    2958 If providing your own implementation, you need to implement a subset of std::atomic:
    2959 
    2960 - Constructor(uint32_t desired)
    2961 - uint32_t load() const
    2962 - void store(uint32_t desired)
    2963 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2964 */
    2965 #ifndef VMA_ATOMIC_UINT32
    2966  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2967 #endif
    2968 
    2969 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2970 
    2974  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2975 #endif
    2976 
    2977 #ifndef VMA_DEBUG_ALIGNMENT
    2978 
    2982  #define VMA_DEBUG_ALIGNMENT (1)
    2983 #endif
    2984 
    2985 #ifndef VMA_DEBUG_MARGIN
    2986 
    2990  #define VMA_DEBUG_MARGIN (0)
    2991 #endif
    2992 
    2993 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2994 
    2998  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2999 #endif
    3000 
    3001 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3002 
    3007  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3008 #endif
    3009 
    3010 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3011 
    3015  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3016 #endif
    3017 
    3018 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3019 
    3023  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3024 #endif
    3025 
    3026 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3027  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3029 #endif
    3030 
    3031 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3032  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3034 #endif
    3035 
    3036 #ifndef VMA_CLASS_NO_COPY
    3037  #define VMA_CLASS_NO_COPY(className) \
    3038  private: \
    3039  className(const className&) = delete; \
    3040  className& operator=(const className&) = delete;
    3041 #endif
    3042 
    3043 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3044 
    3045 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3046 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3047 
    3048 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3049 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3050 
    3051 /*******************************************************************************
    3052 END OF CONFIGURATION
    3053 */
    3054 
    3055 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3056  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3057 
    3058 // Returns number of bits set to 1 in (v).
    3059 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3060 {
    3061  uint32_t c = v - ((v >> 1) & 0x55555555);
    3062  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3063  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3064  c = ((c >> 8) + c) & 0x00FF00FF;
    3065  c = ((c >> 16) + c) & 0x0000FFFF;
    3066  return c;
    3067 }
    3068 
    3069 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3070 // Use types like uint32_t, uint64_t as T.
    3071 template <typename T>
    3072 static inline T VmaAlignUp(T val, T align)
    3073 {
    3074  return (val + align - 1) / align * align;
    3075 }
    3076 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3077 // Use types like uint32_t, uint64_t as T.
    3078 template <typename T>
    3079 static inline T VmaAlignDown(T val, T align)
    3080 {
    3081  return val / align * align;
    3082 }
    3083 
    3084 // Division with mathematical rounding to nearest number.
    3085 template <typename T>
    3086 static inline T VmaRoundDiv(T x, T y)
    3087 {
    3088  return (x + (y / (T)2)) / y;
    3089 }
    3090 
    3091 /*
    3092 Returns true if given number is a power of two.
    3093 T must be unsigned integer number or signed integer but always nonnegative.
    3094 For 0 returns true.
    3095 */
    3096 template <typename T>
    3097 inline bool VmaIsPow2(T x)
    3098 {
    3099  return (x & (x-1)) == 0;
    3100 }
    3101 
    3102 // Returns smallest power of 2 greater or equal to v.
    3103 static inline uint32_t VmaNextPow2(uint32_t v)
    3104 {
    3105  v--;
    3106  v |= v >> 1;
    3107  v |= v >> 2;
    3108  v |= v >> 4;
    3109  v |= v >> 8;
    3110  v |= v >> 16;
    3111  v++;
    3112  return v;
    3113 }
    3114 static inline uint64_t VmaNextPow2(uint64_t v)
    3115 {
    3116  v--;
    3117  v |= v >> 1;
    3118  v |= v >> 2;
    3119  v |= v >> 4;
    3120  v |= v >> 8;
    3121  v |= v >> 16;
    3122  v |= v >> 32;
    3123  v++;
    3124  return v;
    3125 }
    3126 
    3127 // Returns largest power of 2 less or equal to v.
    3128 static inline uint32_t VmaPrevPow2(uint32_t v)
    3129 {
    3130  v |= v >> 1;
    3131  v |= v >> 2;
    3132  v |= v >> 4;
    3133  v |= v >> 8;
    3134  v |= v >> 16;
    3135  v = v ^ (v >> 1);
    3136  return v;
    3137 }
    3138 static inline uint64_t VmaPrevPow2(uint64_t v)
    3139 {
    3140  v |= v >> 1;
    3141  v |= v >> 2;
    3142  v |= v >> 4;
    3143  v |= v >> 8;
    3144  v |= v >> 16;
    3145  v |= v >> 32;
    3146  v = v ^ (v >> 1);
    3147  return v;
    3148 }
    3149 
    3150 static inline bool VmaStrIsEmpty(const char* pStr)
    3151 {
    3152  return pStr == VMA_NULL || *pStr == '\0';
    3153 }
    3154 
    3155 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3156 {
    3157  switch(algorithm)
    3158  {
    3160  return "Linear";
    3162  return "Buddy";
    3163  case 0:
    3164  return "Default";
    3165  default:
    3166  VMA_ASSERT(0);
    3167  return "";
    3168  }
    3169 }
    3170 
    3171 #ifndef VMA_SORT
    3172 
    3173 template<typename Iterator, typename Compare>
    3174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3175 {
    3176  Iterator centerValue = end; --centerValue;
    3177  Iterator insertIndex = beg;
    3178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3179  {
    3180  if(cmp(*memTypeIndex, *centerValue))
    3181  {
    3182  if(insertIndex != memTypeIndex)
    3183  {
    3184  VMA_SWAP(*memTypeIndex, *insertIndex);
    3185  }
    3186  ++insertIndex;
    3187  }
    3188  }
    3189  if(insertIndex != centerValue)
    3190  {
    3191  VMA_SWAP(*insertIndex, *centerValue);
    3192  }
    3193  return insertIndex;
    3194 }
    3195 
    3196 template<typename Iterator, typename Compare>
    3197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3198 {
    3199  if(beg < end)
    3200  {
    3201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3204  }
    3205 }
    3206 
    3207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3208 
    3209 #endif // #ifndef VMA_SORT
    3210 
    3211 /*
    3212 Returns true if two memory blocks occupy overlapping pages.
    3213 ResourceA must be in less memory offset than ResourceB.
    3214 
    3215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3217 */
    3218 static inline bool VmaBlocksOnSamePage(
    3219  VkDeviceSize resourceAOffset,
    3220  VkDeviceSize resourceASize,
    3221  VkDeviceSize resourceBOffset,
    3222  VkDeviceSize pageSize)
    3223 {
    3224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3227  VkDeviceSize resourceBStart = resourceBOffset;
    3228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3229  return resourceAEndPage == resourceBStartPage;
    3230 }
    3231 
    3232 enum VmaSuballocationType
    3233 {
    3234  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3241 };
    3242 
    3243 /*
    3244 Returns true if given suballocation types could conflict and must respect
    3245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3246 or linear image and another one is optimal image. If type is unknown, behave
    3247 conservatively.
    3248 */
    3249 static inline bool VmaIsBufferImageGranularityConflict(
    3250  VmaSuballocationType suballocType1,
    3251  VmaSuballocationType suballocType2)
    3252 {
    3253  if(suballocType1 > suballocType2)
    3254  {
    3255  VMA_SWAP(suballocType1, suballocType2);
    3256  }
    3257 
    3258  switch(suballocType1)
    3259  {
    3260  case VMA_SUBALLOCATION_TYPE_FREE:
    3261  return false;
    3262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3263  return true;
    3264  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3265  return
    3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3274  return
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3277  return false;
    3278  default:
    3279  VMA_ASSERT(0);
    3280  return true;
    3281  }
    3282 }
    3283 
    3284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3285 {
    3286  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3287  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3288  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3289  {
    3290  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3291  }
    3292 }
    3293 
    3294 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3295 {
    3296  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3297  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3298  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3299  {
    3300  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3301  {
    3302  return false;
    3303  }
    3304  }
    3305  return true;
    3306 }
    3307 
    3308 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3309 struct VmaMutexLock
    3310 {
    3311  VMA_CLASS_NO_COPY(VmaMutexLock)
    3312 public:
    3313  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3314  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3315  {
    3316  if(m_pMutex)
    3317  {
    3318  m_pMutex->Lock();
    3319  }
    3320  }
    3321 
    3322  ~VmaMutexLock()
    3323  {
    3324  if(m_pMutex)
    3325  {
    3326  m_pMutex->Unlock();
    3327  }
    3328  }
    3329 
    3330 private:
    3331  VMA_MUTEX* m_pMutex;
    3332 };
    3333 
    3334 #if VMA_DEBUG_GLOBAL_MUTEX
    3335  static VMA_MUTEX gDebugGlobalMutex;
    3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3337 #else
    3338  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3339 #endif
    3340 
    3341 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3342 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3343 
    3344 /*
    3345 Performs binary search and returns iterator to first element that is greater or
    3346 equal to (key), according to comparison (cmp).
    3347 
    3348 Cmp should return true if first argument is less than second argument.
    3349 
    3350 Returned value is the found element, if present in the collection or place where
    3351 new element with value (key) should be inserted.
    3352 */
    3353 template <typename CmpLess, typename IterT, typename KeyT>
    3354 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3355 {
    3356  size_t down = 0, up = (end - beg);
    3357  while(down < up)
    3358  {
    3359  const size_t mid = (down + up) / 2;
    3360  if(cmp(*(beg+mid), key))
    3361  {
    3362  down = mid + 1;
    3363  }
    3364  else
    3365  {
    3366  up = mid;
    3367  }
    3368  }
    3369  return beg + down;
    3370 }
    3371 
    3373 // Memory allocation
    3374 
    3375 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3376 {
    3377  if((pAllocationCallbacks != VMA_NULL) &&
    3378  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3379  {
    3380  return (*pAllocationCallbacks->pfnAllocation)(
    3381  pAllocationCallbacks->pUserData,
    3382  size,
    3383  alignment,
    3384  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3385  }
    3386  else
    3387  {
    3388  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3389  }
    3390 }
    3391 
    3392 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3393 {
    3394  if((pAllocationCallbacks != VMA_NULL) &&
    3395  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3396  {
    3397  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3398  }
    3399  else
    3400  {
    3401  VMA_SYSTEM_FREE(ptr);
    3402  }
    3403 }
    3404 
    3405 template<typename T>
    3406 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3407 {
    3408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3409 }
    3410 
    3411 template<typename T>
    3412 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3413 {
    3414  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3415 }
    3416 
    3417 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3418 
    3419 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3420 
    3421 template<typename T>
    3422 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3423 {
    3424  ptr->~T();
    3425  VmaFree(pAllocationCallbacks, ptr);
    3426 }
    3427 
    3428 template<typename T>
    3429 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3430 {
    3431  if(ptr != VMA_NULL)
    3432  {
    3433  for(size_t i = count; i--; )
    3434  {
    3435  ptr[i].~T();
    3436  }
    3437  VmaFree(pAllocationCallbacks, ptr);
    3438  }
    3439 }
    3440 
    3441 // STL-compatible allocator.
    3442 template<typename T>
    3443 class VmaStlAllocator
    3444 {
    3445 public:
    3446  const VkAllocationCallbacks* const m_pCallbacks;
    3447  typedef T value_type;
    3448 
    3449  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3450  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3451 
    3452  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3453  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3454 
    3455  template<typename U>
    3456  bool operator==(const VmaStlAllocator<U>& rhs) const
    3457  {
    3458  return m_pCallbacks == rhs.m_pCallbacks;
    3459  }
    3460  template<typename U>
    3461  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3462  {
    3463  return m_pCallbacks != rhs.m_pCallbacks;
    3464  }
    3465 
    3466  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3467 };
    3468 
    3469 #if VMA_USE_STL_VECTOR
    3470 
    3471 #define VmaVector std::vector
    3472 
    3473 template<typename T, typename allocatorT>
    3474 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3475 {
    3476  vec.insert(vec.begin() + index, item);
    3477 }
    3478 
    3479 template<typename T, typename allocatorT>
    3480 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3481 {
    3482  vec.erase(vec.begin() + index);
    3483 }
    3484 
    3485 #else // #if VMA_USE_STL_VECTOR
    3486 
    3487 /* Class with interface compatible with subset of std::vector.
    3488 T must be POD because constructors and destructors are not called and memcpy is
    3489 used for these objects. */
    3490 template<typename T, typename AllocatorT>
    3491 class VmaVector
    3492 {
    3493 public:
    3494  typedef T value_type;
    3495 
    3496  VmaVector(const AllocatorT& allocator) :
    3497  m_Allocator(allocator),
    3498  m_pArray(VMA_NULL),
    3499  m_Count(0),
    3500  m_Capacity(0)
    3501  {
    3502  }
    3503 
    3504  VmaVector(size_t count, const AllocatorT& allocator) :
    3505  m_Allocator(allocator),
    3506  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3507  m_Count(count),
    3508  m_Capacity(count)
    3509  {
    3510  }
    3511 
    3512  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3513  m_Allocator(src.m_Allocator),
    3514  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3515  m_Count(src.m_Count),
    3516  m_Capacity(src.m_Count)
    3517  {
    3518  if(m_Count != 0)
    3519  {
    3520  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3521  }
    3522  }
    3523 
    3524  ~VmaVector()
    3525  {
    3526  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3527  }
    3528 
    3529  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3530  {
    3531  if(&rhs != this)
    3532  {
    3533  resize(rhs.m_Count);
    3534  if(m_Count != 0)
    3535  {
    3536  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3537  }
    3538  }
    3539  return *this;
    3540  }
    3541 
    3542  bool empty() const { return m_Count == 0; }
    3543  size_t size() const { return m_Count; }
    3544  T* data() { return m_pArray; }
    3545  const T* data() const { return m_pArray; }
    3546 
    3547  T& operator[](size_t index)
    3548  {
    3549  VMA_HEAVY_ASSERT(index < m_Count);
    3550  return m_pArray[index];
    3551  }
    3552  const T& operator[](size_t index) const
    3553  {
    3554  VMA_HEAVY_ASSERT(index < m_Count);
    3555  return m_pArray[index];
    3556  }
    3557 
    3558  T& front()
    3559  {
    3560  VMA_HEAVY_ASSERT(m_Count > 0);
    3561  return m_pArray[0];
    3562  }
    3563  const T& front() const
    3564  {
    3565  VMA_HEAVY_ASSERT(m_Count > 0);
    3566  return m_pArray[0];
    3567  }
    3568  T& back()
    3569  {
    3570  VMA_HEAVY_ASSERT(m_Count > 0);
    3571  return m_pArray[m_Count - 1];
    3572  }
    3573  const T& back() const
    3574  {
    3575  VMA_HEAVY_ASSERT(m_Count > 0);
    3576  return m_pArray[m_Count - 1];
    3577  }
    3578 
    3579  void reserve(size_t newCapacity, bool freeMemory = false)
    3580  {
    3581  newCapacity = VMA_MAX(newCapacity, m_Count);
    3582 
    3583  if((newCapacity < m_Capacity) && !freeMemory)
    3584  {
    3585  newCapacity = m_Capacity;
    3586  }
    3587 
    3588  if(newCapacity != m_Capacity)
    3589  {
    3590  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3591  if(m_Count != 0)
    3592  {
    3593  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3594  }
    3595  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3596  m_Capacity = newCapacity;
    3597  m_pArray = newArray;
    3598  }
    3599  }
    3600 
    3601  void resize(size_t newCount, bool freeMemory = false)
    3602  {
    3603  size_t newCapacity = m_Capacity;
    3604  if(newCount > m_Capacity)
    3605  {
    3606  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3607  }
    3608  else if(freeMemory)
    3609  {
    3610  newCapacity = newCount;
    3611  }
    3612 
    3613  if(newCapacity != m_Capacity)
    3614  {
    3615  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3616  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3617  if(elementsToCopy != 0)
    3618  {
    3619  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3620  }
    3621  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3622  m_Capacity = newCapacity;
    3623  m_pArray = newArray;
    3624  }
    3625 
    3626  m_Count = newCount;
    3627  }
    3628 
    3629  void clear(bool freeMemory = false)
    3630  {
    3631  resize(0, freeMemory);
    3632  }
    3633 
    3634  void insert(size_t index, const T& src)
    3635  {
    3636  VMA_HEAVY_ASSERT(index <= m_Count);
    3637  const size_t oldCount = size();
    3638  resize(oldCount + 1);
    3639  if(index < oldCount)
    3640  {
    3641  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3642  }
    3643  m_pArray[index] = src;
    3644  }
    3645 
    3646  void remove(size_t index)
    3647  {
    3648  VMA_HEAVY_ASSERT(index < m_Count);
    3649  const size_t oldCount = size();
    3650  if(index < oldCount - 1)
    3651  {
    3652  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3653  }
    3654  resize(oldCount - 1);
    3655  }
    3656 
    3657  void push_back(const T& src)
    3658  {
    3659  const size_t newIndex = size();
    3660  resize(newIndex + 1);
    3661  m_pArray[newIndex] = src;
    3662  }
    3663 
    3664  void pop_back()
    3665  {
    3666  VMA_HEAVY_ASSERT(m_Count > 0);
    3667  resize(size() - 1);
    3668  }
    3669 
    3670  void push_front(const T& src)
    3671  {
    3672  insert(0, src);
    3673  }
    3674 
    3675  void pop_front()
    3676  {
    3677  VMA_HEAVY_ASSERT(m_Count > 0);
    3678  remove(0);
    3679  }
    3680 
    3681  typedef T* iterator;
    3682 
    3683  iterator begin() { return m_pArray; }
    3684  iterator end() { return m_pArray + m_Count; }
    3685 
    3686 private:
    3687  AllocatorT m_Allocator;
    3688  T* m_pArray;
    3689  size_t m_Count;
    3690  size_t m_Capacity;
    3691 };
    3692 
    3693 template<typename T, typename allocatorT>
    3694 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3695 {
    3696  vec.insert(index, item);
    3697 }
    3698 
    3699 template<typename T, typename allocatorT>
    3700 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3701 {
    3702  vec.remove(index);
    3703 }
    3704 
    3705 #endif // #if VMA_USE_STL_VECTOR
    3706 
    3707 template<typename CmpLess, typename VectorT>
    3708 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3709 {
    3710  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3711  vector.data(),
    3712  vector.data() + vector.size(),
    3713  value,
    3714  CmpLess()) - vector.data();
    3715  VmaVectorInsert(vector, indexToInsert, value);
    3716  return indexToInsert;
    3717 }
    3718 
    3719 template<typename CmpLess, typename VectorT>
    3720 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3721 {
    3722  CmpLess comparator;
    3723  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3724  vector.begin(),
    3725  vector.end(),
    3726  value,
    3727  comparator);
    3728  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3729  {
    3730  size_t indexToRemove = it - vector.begin();
    3731  VmaVectorRemove(vector, indexToRemove);
    3732  return true;
    3733  }
    3734  return false;
    3735 }
    3736 
    3737 template<typename CmpLess, typename IterT, typename KeyT>
    3738 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3739 {
    3740  CmpLess comparator;
    3741  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3742  beg, end, value, comparator);
    3743  if(it == end ||
    3744  (!comparator(*it, value) && !comparator(value, *it)))
    3745  {
    3746  return it;
    3747  }
    3748  return end;
    3749 }
    3750 
    3752 // class VmaPoolAllocator
    3753 
    3754 /*
    3755 Allocator for objects of type T using a list of arrays (pools) to speed up
    3756 allocation. Number of elements that can be allocated is not bounded because
    3757 allocator can create multiple blocks.
    3758 */
    3759 template<typename T>
    3760 class VmaPoolAllocator
    3761 {
    3762  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3763 public:
    3764  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3765  ~VmaPoolAllocator();
    3766  void Clear();
    3767  T* Alloc();
    3768  void Free(T* ptr);
    3769 
    3770 private:
    3771  union Item
    3772  {
    3773  uint32_t NextFreeIndex;
    3774  T Value;
    3775  };
    3776 
    3777  struct ItemBlock
    3778  {
    3779  Item* pItems;
    3780  uint32_t FirstFreeIndex;
    3781  };
    3782 
    3783  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3784  size_t m_ItemsPerBlock;
    3785  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3786 
    3787  ItemBlock& CreateNewBlock();
    3788 };
    3789 
    3790 template<typename T>
    3791 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3792  m_pAllocationCallbacks(pAllocationCallbacks),
    3793  m_ItemsPerBlock(itemsPerBlock),
    3794  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3795 {
    3796  VMA_ASSERT(itemsPerBlock > 0);
    3797 }
    3798 
    3799 template<typename T>
    3800 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3801 {
    3802  Clear();
    3803 }
    3804 
    3805 template<typename T>
    3806 void VmaPoolAllocator<T>::Clear()
    3807 {
    3808  for(size_t i = m_ItemBlocks.size(); i--; )
    3809  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3810  m_ItemBlocks.clear();
    3811 }
    3812 
    3813 template<typename T>
    3814 T* VmaPoolAllocator<T>::Alloc()
    3815 {
    3816  for(size_t i = m_ItemBlocks.size(); i--; )
    3817  {
    3818  ItemBlock& block = m_ItemBlocks[i];
    3819  // This block has some free items: Use first one.
    3820  if(block.FirstFreeIndex != UINT32_MAX)
    3821  {
    3822  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3823  block.FirstFreeIndex = pItem->NextFreeIndex;
    3824  return &pItem->Value;
    3825  }
    3826  }
    3827 
    3828  // No block has free item: Create new one and use it.
    3829  ItemBlock& newBlock = CreateNewBlock();
    3830  Item* const pItem = &newBlock.pItems[0];
    3831  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3832  return &pItem->Value;
    3833 }
    3834 
    3835 template<typename T>
    3836 void VmaPoolAllocator<T>::Free(T* ptr)
    3837 {
    3838  // Search all memory blocks to find ptr.
    3839  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3840  {
    3841  ItemBlock& block = m_ItemBlocks[i];
    3842 
    3843  // Casting to union.
    3844  Item* pItemPtr;
    3845  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3846 
    3847  // Check if pItemPtr is in address range of this block.
    3848  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3849  {
    3850  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3851  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3852  block.FirstFreeIndex = index;
    3853  return;
    3854  }
    3855  }
    3856  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3857 }
    3858 
    3859 template<typename T>
    3860 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3861 {
    3862  ItemBlock newBlock = {
    3863  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3864 
    3865  m_ItemBlocks.push_back(newBlock);
    3866 
    3867  // Setup singly-linked list of all free items in this block.
    3868  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3869  newBlock.pItems[i].NextFreeIndex = i + 1;
    3870  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3871  return m_ItemBlocks.back();
    3872 }
    3873 
    3875 // class VmaRawList, VmaList
    3876 
    3877 #if VMA_USE_STL_LIST
    3878 
    3879 #define VmaList std::list
    3880 
    3881 #else // #if VMA_USE_STL_LIST
    3882 
    3883 template<typename T>
    3884 struct VmaListItem
    3885 {
    3886  VmaListItem* pPrev;
    3887  VmaListItem* pNext;
    3888  T Value;
    3889 };
    3890 
    3891 // Doubly linked list.
    3892 template<typename T>
    3893 class VmaRawList
    3894 {
    3895  VMA_CLASS_NO_COPY(VmaRawList)
    3896 public:
    3897  typedef VmaListItem<T> ItemType;
    3898 
    3899  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3900  ~VmaRawList();
    3901  void Clear();
    3902 
    3903  size_t GetCount() const { return m_Count; }
    3904  bool IsEmpty() const { return m_Count == 0; }
    3905 
    3906  ItemType* Front() { return m_pFront; }
    3907  const ItemType* Front() const { return m_pFront; }
    3908  ItemType* Back() { return m_pBack; }
    3909  const ItemType* Back() const { return m_pBack; }
    3910 
    3911  ItemType* PushBack();
    3912  ItemType* PushFront();
    3913  ItemType* PushBack(const T& value);
    3914  ItemType* PushFront(const T& value);
    3915  void PopBack();
    3916  void PopFront();
    3917 
    3918  // Item can be null - it means PushBack.
    3919  ItemType* InsertBefore(ItemType* pItem);
    3920  // Item can be null - it means PushFront.
    3921  ItemType* InsertAfter(ItemType* pItem);
    3922 
    3923  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3924  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3925 
    3926  void Remove(ItemType* pItem);
    3927 
    3928 private:
    3929  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3930  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3931  ItemType* m_pFront;
    3932  ItemType* m_pBack;
    3933  size_t m_Count;
    3934 };
    3935 
    3936 template<typename T>
    3937 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3938  m_pAllocationCallbacks(pAllocationCallbacks),
    3939  m_ItemAllocator(pAllocationCallbacks, 128),
    3940  m_pFront(VMA_NULL),
    3941  m_pBack(VMA_NULL),
    3942  m_Count(0)
    3943 {
    3944 }
    3945 
    3946 template<typename T>
    3947 VmaRawList<T>::~VmaRawList()
    3948 {
    3949  // Intentionally not calling Clear, because that would be unnecessary
    3950  // computations to return all items to m_ItemAllocator as free.
    3951 }
    3952 
    3953 template<typename T>
    3954 void VmaRawList<T>::Clear()
    3955 {
    3956  if(IsEmpty() == false)
    3957  {
    3958  ItemType* pItem = m_pBack;
    3959  while(pItem != VMA_NULL)
    3960  {
    3961  ItemType* const pPrevItem = pItem->pPrev;
    3962  m_ItemAllocator.Free(pItem);
    3963  pItem = pPrevItem;
    3964  }
    3965  m_pFront = VMA_NULL;
    3966  m_pBack = VMA_NULL;
    3967  m_Count = 0;
    3968  }
    3969 }
    3970 
    3971 template<typename T>
    3972 VmaListItem<T>* VmaRawList<T>::PushBack()
    3973 {
    3974  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3975  pNewItem->pNext = VMA_NULL;
    3976  if(IsEmpty())
    3977  {
    3978  pNewItem->pPrev = VMA_NULL;
    3979  m_pFront = pNewItem;
    3980  m_pBack = pNewItem;
    3981  m_Count = 1;
    3982  }
    3983  else
    3984  {
    3985  pNewItem->pPrev = m_pBack;
    3986  m_pBack->pNext = pNewItem;
    3987  m_pBack = pNewItem;
    3988  ++m_Count;
    3989  }
    3990  return pNewItem;
    3991 }
    3992 
    3993 template<typename T>
    3994 VmaListItem<T>* VmaRawList<T>::PushFront()
    3995 {
    3996  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3997  pNewItem->pPrev = VMA_NULL;
    3998  if(IsEmpty())
    3999  {
    4000  pNewItem->pNext = VMA_NULL;
    4001  m_pFront = pNewItem;
    4002  m_pBack = pNewItem;
    4003  m_Count = 1;
    4004  }
    4005  else
    4006  {
    4007  pNewItem->pNext = m_pFront;
    4008  m_pFront->pPrev = pNewItem;
    4009  m_pFront = pNewItem;
    4010  ++m_Count;
    4011  }
    4012  return pNewItem;
    4013 }
    4014 
    4015 template<typename T>
    4016 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4017 {
    4018  ItemType* const pNewItem = PushBack();
    4019  pNewItem->Value = value;
    4020  return pNewItem;
    4021 }
    4022 
    4023 template<typename T>
    4024 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4025 {
    4026  ItemType* const pNewItem = PushFront();
    4027  pNewItem->Value = value;
    4028  return pNewItem;
    4029 }
    4030 
    4031 template<typename T>
    4032 void VmaRawList<T>::PopBack()
    4033 {
    4034  VMA_HEAVY_ASSERT(m_Count > 0);
    4035  ItemType* const pBackItem = m_pBack;
    4036  ItemType* const pPrevItem = pBackItem->pPrev;
    4037  if(pPrevItem != VMA_NULL)
    4038  {
    4039  pPrevItem->pNext = VMA_NULL;
    4040  }
    4041  m_pBack = pPrevItem;
    4042  m_ItemAllocator.Free(pBackItem);
    4043  --m_Count;
    4044 }
    4045 
    4046 template<typename T>
    4047 void VmaRawList<T>::PopFront()
    4048 {
    4049  VMA_HEAVY_ASSERT(m_Count > 0);
    4050  ItemType* const pFrontItem = m_pFront;
    4051  ItemType* const pNextItem = pFrontItem->pNext;
    4052  if(pNextItem != VMA_NULL)
    4053  {
    4054  pNextItem->pPrev = VMA_NULL;
    4055  }
    4056  m_pFront = pNextItem;
    4057  m_ItemAllocator.Free(pFrontItem);
    4058  --m_Count;
    4059 }
    4060 
    4061 template<typename T>
    4062 void VmaRawList<T>::Remove(ItemType* pItem)
    4063 {
    4064  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4065  VMA_HEAVY_ASSERT(m_Count > 0);
    4066 
    4067  if(pItem->pPrev != VMA_NULL)
    4068  {
    4069  pItem->pPrev->pNext = pItem->pNext;
    4070  }
    4071  else
    4072  {
    4073  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4074  m_pFront = pItem->pNext;
    4075  }
    4076 
    4077  if(pItem->pNext != VMA_NULL)
    4078  {
    4079  pItem->pNext->pPrev = pItem->pPrev;
    4080  }
    4081  else
    4082  {
    4083  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4084  m_pBack = pItem->pPrev;
    4085  }
    4086 
    4087  m_ItemAllocator.Free(pItem);
    4088  --m_Count;
    4089 }
    4090 
    4091 template<typename T>
    4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4093 {
    4094  if(pItem != VMA_NULL)
    4095  {
    4096  ItemType* const prevItem = pItem->pPrev;
    4097  ItemType* const newItem = m_ItemAllocator.Alloc();
    4098  newItem->pPrev = prevItem;
    4099  newItem->pNext = pItem;
    4100  pItem->pPrev = newItem;
    4101  if(prevItem != VMA_NULL)
    4102  {
    4103  prevItem->pNext = newItem;
    4104  }
    4105  else
    4106  {
    4107  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4108  m_pFront = newItem;
    4109  }
    4110  ++m_Count;
    4111  return newItem;
    4112  }
    4113  else
    4114  return PushBack();
    4115 }
    4116 
    4117 template<typename T>
    4118 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4119 {
    4120  if(pItem != VMA_NULL)
    4121  {
    4122  ItemType* const nextItem = pItem->pNext;
    4123  ItemType* const newItem = m_ItemAllocator.Alloc();
    4124  newItem->pNext = nextItem;
    4125  newItem->pPrev = pItem;
    4126  pItem->pNext = newItem;
    4127  if(nextItem != VMA_NULL)
    4128  {
    4129  nextItem->pPrev = newItem;
    4130  }
    4131  else
    4132  {
    4133  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4134  m_pBack = newItem;
    4135  }
    4136  ++m_Count;
    4137  return newItem;
    4138  }
    4139  else
    4140  return PushFront();
    4141 }
    4142 
    4143 template<typename T>
    4144 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4145 {
    4146  ItemType* const newItem = InsertBefore(pItem);
    4147  newItem->Value = value;
    4148  return newItem;
    4149 }
    4150 
    4151 template<typename T>
    4152 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4153 {
    4154  ItemType* const newItem = InsertAfter(pItem);
    4155  newItem->Value = value;
    4156  return newItem;
    4157 }
    4158 
    4159 template<typename T, typename AllocatorT>
    4160 class VmaList
    4161 {
    4162  VMA_CLASS_NO_COPY(VmaList)
    4163 public:
    4164  class iterator
    4165  {
    4166  public:
    4167  iterator() :
    4168  m_pList(VMA_NULL),
    4169  m_pItem(VMA_NULL)
    4170  {
    4171  }
    4172 
    4173  T& operator*() const
    4174  {
    4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4176  return m_pItem->Value;
    4177  }
    4178  T* operator->() const
    4179  {
    4180  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4181  return &m_pItem->Value;
    4182  }
    4183 
    4184  iterator& operator++()
    4185  {
    4186  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4187  m_pItem = m_pItem->pNext;
    4188  return *this;
    4189  }
    4190  iterator& operator--()
    4191  {
    4192  if(m_pItem != VMA_NULL)
    4193  {
    4194  m_pItem = m_pItem->pPrev;
    4195  }
    4196  else
    4197  {
    4198  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4199  m_pItem = m_pList->Back();
    4200  }
    4201  return *this;
    4202  }
    4203 
    4204  iterator operator++(int)
    4205  {
    4206  iterator result = *this;
    4207  ++*this;
    4208  return result;
    4209  }
    4210  iterator operator--(int)
    4211  {
    4212  iterator result = *this;
    4213  --*this;
    4214  return result;
    4215  }
    4216 
    4217  bool operator==(const iterator& rhs) const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4220  return m_pItem == rhs.m_pItem;
    4221  }
    4222  bool operator!=(const iterator& rhs) const
    4223  {
    4224  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4225  return m_pItem != rhs.m_pItem;
    4226  }
    4227 
    4228  private:
    4229  VmaRawList<T>* m_pList;
    4230  VmaListItem<T>* m_pItem;
    4231 
    4232  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4233  m_pList(pList),
    4234  m_pItem(pItem)
    4235  {
    4236  }
    4237 
    4238  friend class VmaList<T, AllocatorT>;
    4239  };
    4240 
    4241  class const_iterator
    4242  {
    4243  public:
    4244  const_iterator() :
    4245  m_pList(VMA_NULL),
    4246  m_pItem(VMA_NULL)
    4247  {
    4248  }
    4249 
    4250  const_iterator(const iterator& src) :
    4251  m_pList(src.m_pList),
    4252  m_pItem(src.m_pItem)
    4253  {
    4254  }
    4255 
    4256  const T& operator*() const
    4257  {
    4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4259  return m_pItem->Value;
    4260  }
    4261  const T* operator->() const
    4262  {
    4263  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4264  return &m_pItem->Value;
    4265  }
    4266 
    4267  const_iterator& operator++()
    4268  {
    4269  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4270  m_pItem = m_pItem->pNext;
    4271  return *this;
    4272  }
    4273  const_iterator& operator--()
    4274  {
    4275  if(m_pItem != VMA_NULL)
    4276  {
    4277  m_pItem = m_pItem->pPrev;
    4278  }
    4279  else
    4280  {
    4281  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4282  m_pItem = m_pList->Back();
    4283  }
    4284  return *this;
    4285  }
    4286 
    4287  const_iterator operator++(int)
    4288  {
    4289  const_iterator result = *this;
    4290  ++*this;
    4291  return result;
    4292  }
    4293  const_iterator operator--(int)
    4294  {
    4295  const_iterator result = *this;
    4296  --*this;
    4297  return result;
    4298  }
    4299 
    4300  bool operator==(const const_iterator& rhs) const
    4301  {
    4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4303  return m_pItem == rhs.m_pItem;
    4304  }
    4305  bool operator!=(const const_iterator& rhs) const
    4306  {
    4307  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4308  return m_pItem != rhs.m_pItem;
    4309  }
    4310 
    4311  private:
    4312  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4313  m_pList(pList),
    4314  m_pItem(pItem)
    4315  {
    4316  }
    4317 
    4318  const VmaRawList<T>* m_pList;
    4319  const VmaListItem<T>* m_pItem;
    4320 
    4321  friend class VmaList<T, AllocatorT>;
    4322  };
    4323 
    4324  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4325 
    4326  bool empty() const { return m_RawList.IsEmpty(); }
    4327  size_t size() const { return m_RawList.GetCount(); }
    4328 
    4329  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4330  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4331 
    4332  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4333  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4334 
    4335  void clear() { m_RawList.Clear(); }
    4336  void push_back(const T& value) { m_RawList.PushBack(value); }
    4337  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4338  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4339 
    4340 private:
    4341  VmaRawList<T> m_RawList;
    4342 };
    4343 
    4344 #endif // #if VMA_USE_STL_LIST
    4345 
    4347 // class VmaMap
    4348 
    4349 // Unused in this version.
    4350 #if 0
    4351 
    4352 #if VMA_USE_STL_UNORDERED_MAP
    4353 
    4354 #define VmaPair std::pair
    4355 
    4356 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4357  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4358 
    4359 #else // #if VMA_USE_STL_UNORDERED_MAP
    4360 
    4361 template<typename T1, typename T2>
    4362 struct VmaPair
    4363 {
    4364  T1 first;
    4365  T2 second;
    4366 
    4367  VmaPair() : first(), second() { }
    4368  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4369 };
    4370 
    4371 /* Class compatible with subset of interface of std::unordered_map.
    4372 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4373 */
    4374 template<typename KeyT, typename ValueT>
    4375 class VmaMap
    4376 {
    4377 public:
    4378  typedef VmaPair<KeyT, ValueT> PairType;
    4379  typedef PairType* iterator;
    4380 
    4381  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4382 
    4383  iterator begin() { return m_Vector.begin(); }
    4384  iterator end() { return m_Vector.end(); }
    4385 
    4386  void insert(const PairType& pair);
    4387  iterator find(const KeyT& key);
    4388  void erase(iterator it);
    4389 
    4390 private:
    4391  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4392 };
    4393 
    4394 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4395 
    4396 template<typename FirstT, typename SecondT>
    4397 struct VmaPairFirstLess
    4398 {
    4399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4400  {
    4401  return lhs.first < rhs.first;
    4402  }
    4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4404  {
    4405  return lhs.first < rhsFirst;
    4406  }
    4407 };
    4408 
    4409 template<typename KeyT, typename ValueT>
    4410 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4411 {
    4412  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4413  m_Vector.data(),
    4414  m_Vector.data() + m_Vector.size(),
    4415  pair,
    4416  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4417  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4418 }
    4419 
    4420 template<typename KeyT, typename ValueT>
    4421 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4422 {
    4423  PairType* it = VmaBinaryFindFirstNotLess(
    4424  m_Vector.data(),
    4425  m_Vector.data() + m_Vector.size(),
    4426  key,
    4427  VmaPairFirstLess<KeyT, ValueT>());
    4428  if((it != m_Vector.end()) && (it->first == key))
    4429  {
    4430  return it;
    4431  }
    4432  else
    4433  {
    4434  return m_Vector.end();
    4435  }
    4436 }
    4437 
    4438 template<typename KeyT, typename ValueT>
    4439 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4440 {
    4441  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4442 }
    4443 
    4444 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4445 
    4446 #endif // #if 0
    4447 
    4449 
    4450 class VmaDeviceMemoryBlock;
    4451 
    4452 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4453 
    4454 struct VmaAllocation_T
    4455 {
    4456  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4457 private:
    4458  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4459 
    4460  enum FLAGS
    4461  {
    4462  FLAG_USER_DATA_STRING = 0x01,
    4463  };
    4464 
    4465 public:
    4466  enum ALLOCATION_TYPE
    4467  {
    4468  ALLOCATION_TYPE_NONE,
    4469  ALLOCATION_TYPE_BLOCK,
    4470  ALLOCATION_TYPE_DEDICATED,
    4471  };
    4472 
    4473  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4474  m_Alignment(1),
    4475  m_Size(0),
    4476  m_pUserData(VMA_NULL),
    4477  m_LastUseFrameIndex(currentFrameIndex),
    4478  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4479  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4480  m_MapCount(0),
    4481  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4482  {
    4483 #if VMA_STATS_STRING_ENABLED
    4484  m_CreationFrameIndex = currentFrameIndex;
    4485  m_BufferImageUsage = 0;
    4486 #endif
    4487  }
    4488 
    4489  ~VmaAllocation_T()
    4490  {
    4491  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4492 
    4493  // Check if owned string was freed.
    4494  VMA_ASSERT(m_pUserData == VMA_NULL);
    4495  }
    4496 
    4497  void InitBlockAllocation(
    4498  VmaPool hPool,
    4499  VmaDeviceMemoryBlock* block,
    4500  VkDeviceSize offset,
    4501  VkDeviceSize alignment,
    4502  VkDeviceSize size,
    4503  VmaSuballocationType suballocationType,
    4504  bool mapped,
    4505  bool canBecomeLost)
    4506  {
    4507  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4508  VMA_ASSERT(block != VMA_NULL);
    4509  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4510  m_Alignment = alignment;
    4511  m_Size = size;
    4512  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4513  m_SuballocationType = (uint8_t)suballocationType;
    4514  m_BlockAllocation.m_hPool = hPool;
    4515  m_BlockAllocation.m_Block = block;
    4516  m_BlockAllocation.m_Offset = offset;
    4517  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4518  }
    4519 
    4520  void InitLost()
    4521  {
    4522  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4523  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4524  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4525  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4526  m_BlockAllocation.m_Block = VMA_NULL;
    4527  m_BlockAllocation.m_Offset = 0;
    4528  m_BlockAllocation.m_CanBecomeLost = true;
    4529  }
    4530 
    4531  void ChangeBlockAllocation(
    4532  VmaAllocator hAllocator,
    4533  VmaDeviceMemoryBlock* block,
    4534  VkDeviceSize offset);
    4535 
    4536  void ChangeSize(VkDeviceSize newSize);
    4537 
    4538  // pMappedData not null means allocation is created with MAPPED flag.
    4539  void InitDedicatedAllocation(
    4540  uint32_t memoryTypeIndex,
    4541  VkDeviceMemory hMemory,
    4542  VmaSuballocationType suballocationType,
    4543  void* pMappedData,
    4544  VkDeviceSize size)
    4545  {
    4546  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4547  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4548  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4549  m_Alignment = 0;
    4550  m_Size = size;
    4551  m_SuballocationType = (uint8_t)suballocationType;
    4552  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4553  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4554  m_DedicatedAllocation.m_hMemory = hMemory;
    4555  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4556  }
    4557 
    4558  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4559  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4560  VkDeviceSize GetSize() const { return m_Size; }
    4561  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4562  void* GetUserData() const { return m_pUserData; }
    4563  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4564  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4565 
    4566  VmaDeviceMemoryBlock* GetBlock() const
    4567  {
    4568  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4569  return m_BlockAllocation.m_Block;
    4570  }
    4571  VkDeviceSize GetOffset() const;
    4572  VkDeviceMemory GetMemory() const;
    4573  uint32_t GetMemoryTypeIndex() const;
    4574  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4575  void* GetMappedData() const;
    4576  bool CanBecomeLost() const;
    4577  VmaPool GetPool() const;
    4578 
    4579  uint32_t GetLastUseFrameIndex() const
    4580  {
    4581  return m_LastUseFrameIndex.load();
    4582  }
    4583  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4584  {
    4585  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4586  }
    4587  /*
    4588  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4589  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4590  - Else, returns false.
    4591 
    4592  If hAllocation is already lost, assert - you should not call it then.
    4593  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4594  */
    4595  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4596 
    4597  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4598  {
    4599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4600  outInfo.blockCount = 1;
    4601  outInfo.allocationCount = 1;
    4602  outInfo.unusedRangeCount = 0;
    4603  outInfo.usedBytes = m_Size;
    4604  outInfo.unusedBytes = 0;
    4605  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4606  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4607  outInfo.unusedRangeSizeMax = 0;
    4608  }
    4609 
    4610  void BlockAllocMap();
    4611  void BlockAllocUnmap();
    4612  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4613  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4614 
    4615 #if VMA_STATS_STRING_ENABLED
    4616  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4617  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4618 
    4619  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4620  {
    4621  VMA_ASSERT(m_BufferImageUsage == 0);
    4622  m_BufferImageUsage = bufferImageUsage;
    4623  }
    4624 
    4625  void PrintParameters(class VmaJsonWriter& json) const;
    4626 #endif
    4627 
    4628 private:
    4629  VkDeviceSize m_Alignment;
    4630  VkDeviceSize m_Size;
    4631  void* m_pUserData;
    4632  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4633  uint8_t m_Type; // ALLOCATION_TYPE
    4634  uint8_t m_SuballocationType; // VmaSuballocationType
    4635  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4636  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4637  uint8_t m_MapCount;
    4638  uint8_t m_Flags; // enum FLAGS
    4639 
    4640  // Allocation out of VmaDeviceMemoryBlock.
    4641  struct BlockAllocation
    4642  {
    4643  VmaPool m_hPool; // Null if belongs to general memory.
    4644  VmaDeviceMemoryBlock* m_Block;
    4645  VkDeviceSize m_Offset;
    4646  bool m_CanBecomeLost;
    4647  };
    4648 
    4649  // Allocation for an object that has its own private VkDeviceMemory.
    4650  struct DedicatedAllocation
    4651  {
    4652  uint32_t m_MemoryTypeIndex;
    4653  VkDeviceMemory m_hMemory;
    4654  void* m_pMappedData; // Not null means memory is mapped.
    4655  };
    4656 
    4657  union
    4658  {
    4659  // Allocation out of VmaDeviceMemoryBlock.
    4660  BlockAllocation m_BlockAllocation;
    4661  // Allocation for an object that has its own private VkDeviceMemory.
    4662  DedicatedAllocation m_DedicatedAllocation;
    4663  };
    4664 
    4665 #if VMA_STATS_STRING_ENABLED
    4666  uint32_t m_CreationFrameIndex;
    4667  uint32_t m_BufferImageUsage; // 0 if unknown.
    4668 #endif
    4669 
    4670  void FreeUserDataString(VmaAllocator hAllocator);
    4671 };
    4672 
    4673 /*
    4674 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4675 allocated memory block or free.
    4676 */
    4677 struct VmaSuballocation
    4678 {
    4679  VkDeviceSize offset;
    4680  VkDeviceSize size;
    4681  VmaAllocation hAllocation;
    4682  VmaSuballocationType type;
    4683 };
    4684 
    4685 // Comparator for offsets.
    4686 struct VmaSuballocationOffsetLess
    4687 {
    4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4689  {
    4690  return lhs.offset < rhs.offset;
    4691  }
    4692 };
    4693 struct VmaSuballocationOffsetGreater
    4694 {
    4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4696  {
    4697  return lhs.offset > rhs.offset;
    4698  }
    4699 };
    4700 
    4701 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4702 
    4703 // Cost of one additional allocation lost, as equivalent in bytes.
    4704 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4705 
    4706 /*
    4707 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4708 
    4709 If canMakeOtherLost was false:
    4710 - item points to a FREE suballocation.
    4711 - itemsToMakeLostCount is 0.
    4712 
    4713 If canMakeOtherLost was true:
    4714 - item points to first of sequence of suballocations, which are either FREE,
    4715  or point to VmaAllocations that can become lost.
    4716 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4717  the requested allocation to succeed.
    4718 */
    4719 struct VmaAllocationRequest
    4720 {
    4721  VkDeviceSize offset;
    4722  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4723  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4724  VmaSuballocationList::iterator item;
    4725  size_t itemsToMakeLostCount;
    4726  void* customData;
    4727 
    4728  VkDeviceSize CalcCost() const
    4729  {
    4730  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4731  }
    4732 };
    4733 
    4734 /*
    4735 Data structure used for bookkeeping of allocations and unused ranges of memory
    4736 in a single VkDeviceMemory block.
    4737 */
    4738 class VmaBlockMetadata
    4739 {
    4740 public:
    4741  VmaBlockMetadata(VmaAllocator hAllocator);
    4742  virtual ~VmaBlockMetadata() { }
    4743  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4744 
    4745  // Validates all data structures inside this object. If not valid, returns false.
    4746  virtual bool Validate() const = 0;
    4747  VkDeviceSize GetSize() const { return m_Size; }
    4748  virtual size_t GetAllocationCount() const = 0;
    4749  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4750  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4751  // Returns true if this block is empty - contains only single free suballocation.
    4752  virtual bool IsEmpty() const = 0;
    4753 
    4754  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4755  // Shouldn't modify blockCount.
    4756  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4757 
    4758 #if VMA_STATS_STRING_ENABLED
    4759  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4760 #endif
    4761 
    4762  // Tries to find a place for suballocation with given parameters inside this block.
    4763  // If succeeded, fills pAllocationRequest and returns true.
    4764  // If failed, returns false.
    4765  virtual bool CreateAllocationRequest(
    4766  uint32_t currentFrameIndex,
    4767  uint32_t frameInUseCount,
    4768  VkDeviceSize bufferImageGranularity,
    4769  VkDeviceSize allocSize,
    4770  VkDeviceSize allocAlignment,
    4771  bool upperAddress,
    4772  VmaSuballocationType allocType,
    4773  bool canMakeOtherLost,
    4774  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4775  VmaAllocationRequest* pAllocationRequest) = 0;
    4776 
    4777  virtual bool MakeRequestedAllocationsLost(
    4778  uint32_t currentFrameIndex,
    4779  uint32_t frameInUseCount,
    4780  VmaAllocationRequest* pAllocationRequest) = 0;
    4781 
    4782  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4783 
    4784  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4785 
    4786  // Makes actual allocation based on request. Request must already be checked and valid.
    4787  virtual void Alloc(
    4788  const VmaAllocationRequest& request,
    4789  VmaSuballocationType type,
    4790  VkDeviceSize allocSize,
    4791  bool upperAddress,
    4792  VmaAllocation hAllocation) = 0;
    4793 
    4794  // Frees suballocation assigned to given memory region.
    4795  virtual void Free(const VmaAllocation allocation) = 0;
    4796  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4797 
    4798  // Tries to resize (grow or shrink) space for given allocation, in place.
    4799  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4800 
    4801 protected:
    4802  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4803 
    4804 #if VMA_STATS_STRING_ENABLED
    4805  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4806  VkDeviceSize unusedBytes,
    4807  size_t allocationCount,
    4808  size_t unusedRangeCount) const;
    4809  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4810  VkDeviceSize offset,
    4811  VmaAllocation hAllocation) const;
    4812  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4813  VkDeviceSize offset,
    4814  VkDeviceSize size) const;
    4815  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4816 #endif
    4817 
    4818 private:
    4819  VkDeviceSize m_Size;
    4820  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4821 };
    4822 
    4823 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4824  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4825  return false; \
    4826  } } while(false)
    4827 
    4828 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4829 {
    4830  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4831 public:
    4832  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4833  virtual ~VmaBlockMetadata_Generic();
    4834  virtual void Init(VkDeviceSize size);
    4835 
    4836  virtual bool Validate() const;
    4837  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4838  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4839  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4840  virtual bool IsEmpty() const;
    4841 
    4842  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4843  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4844 
    4845 #if VMA_STATS_STRING_ENABLED
    4846  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4847 #endif
    4848 
    4849  virtual bool CreateAllocationRequest(
    4850  uint32_t currentFrameIndex,
    4851  uint32_t frameInUseCount,
    4852  VkDeviceSize bufferImageGranularity,
    4853  VkDeviceSize allocSize,
    4854  VkDeviceSize allocAlignment,
    4855  bool upperAddress,
    4856  VmaSuballocationType allocType,
    4857  bool canMakeOtherLost,
    4858  uint32_t strategy,
    4859  VmaAllocationRequest* pAllocationRequest);
    4860 
    4861  virtual bool MakeRequestedAllocationsLost(
    4862  uint32_t currentFrameIndex,
    4863  uint32_t frameInUseCount,
    4864  VmaAllocationRequest* pAllocationRequest);
    4865 
    4866  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4867 
    4868  virtual VkResult CheckCorruption(const void* pBlockData);
    4869 
    4870  virtual void Alloc(
    4871  const VmaAllocationRequest& request,
    4872  VmaSuballocationType type,
    4873  VkDeviceSize allocSize,
    4874  bool upperAddress,
    4875  VmaAllocation hAllocation);
    4876 
    4877  virtual void Free(const VmaAllocation allocation);
    4878  virtual void FreeAtOffset(VkDeviceSize offset);
    4879 
    4880  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4881 
    4882 private:
    4883  uint32_t m_FreeCount;
    4884  VkDeviceSize m_SumFreeSize;
    4885  VmaSuballocationList m_Suballocations;
    4886  // Suballocations that are free and have size greater than certain threshold.
    4887  // Sorted by size, ascending.
    4888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4889 
    4890  bool ValidateFreeSuballocationList() const;
    4891 
    4892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4893  // If yes, fills pOffset and returns true. If no, returns false.
    4894  bool CheckAllocation(
    4895  uint32_t currentFrameIndex,
    4896  uint32_t frameInUseCount,
    4897  VkDeviceSize bufferImageGranularity,
    4898  VkDeviceSize allocSize,
    4899  VkDeviceSize allocAlignment,
    4900  VmaSuballocationType allocType,
    4901  VmaSuballocationList::const_iterator suballocItem,
    4902  bool canMakeOtherLost,
    4903  VkDeviceSize* pOffset,
    4904  size_t* itemsToMakeLostCount,
    4905  VkDeviceSize* pSumFreeSize,
    4906  VkDeviceSize* pSumItemSize) const;
    4907  // Given free suballocation, it merges it with following one, which must also be free.
    4908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4909  // Releases given suballocation, making it free.
    4910  // Merges it with adjacent free suballocations if applicable.
    4911  // Returns iterator to new free suballocation at this place.
    4912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4913  // Given free suballocation, it inserts it into sorted list of
    4914  // m_FreeSuballocationsBySize if it's suitable.
    4915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4916  // Given free suballocation, it removes it from sorted list of
    4917  // m_FreeSuballocationsBySize if it's suitable.
    4918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4919 };
    4920 
    4921 /*
    4922 Allocations and their references in internal data structure look like this:
    4923 
    4924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4925 
    4926  0 +-------+
    4927  | |
    4928  | |
    4929  | |
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount]
    4932  +-------+
    4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4934  +-------+
    4935  | ... |
    4936  +-------+
    4937  | Alloc | 1st[1st.size() - 1]
    4938  +-------+
    4939  | |
    4940  | |
    4941  | |
    4942 GetSize() +-------+
    4943 
    4944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4945 
    4946  0 +-------+
    4947  | Alloc | 2nd[0]
    4948  +-------+
    4949  | Alloc | 2nd[1]
    4950  +-------+
    4951  | ... |
    4952  +-------+
    4953  | Alloc | 2nd[2nd.size() - 1]
    4954  +-------+
    4955  | |
    4956  | |
    4957  | |
    4958  +-------+
    4959  | Alloc | 1st[m_1stNullItemsBeginCount]
    4960  +-------+
    4961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4962  +-------+
    4963  | ... |
    4964  +-------+
    4965  | Alloc | 1st[1st.size() - 1]
    4966  +-------+
    4967  | |
    4968 GetSize() +-------+
    4969 
    4970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4971 
    4972  0 +-------+
    4973  | |
    4974  | |
    4975  | |
    4976  +-------+
    4977  | Alloc | 1st[m_1stNullItemsBeginCount]
    4978  +-------+
    4979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4980  +-------+
    4981  | ... |
    4982  +-------+
    4983  | Alloc | 1st[1st.size() - 1]
    4984  +-------+
    4985  | |
    4986  | |
    4987  | |
    4988  +-------+
    4989  | Alloc | 2nd[2nd.size() - 1]
    4990  +-------+
    4991  | ... |
    4992  +-------+
    4993  | Alloc | 2nd[1]
    4994  +-------+
    4995  | Alloc | 2nd[0]
    4996 GetSize() +-------+
    4997 
    4998 */
    4999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5000 {
    5001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5002 public:
    5003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5004  virtual ~VmaBlockMetadata_Linear();
    5005  virtual void Init(VkDeviceSize size);
    5006 
    5007  virtual bool Validate() const;
    5008  virtual size_t GetAllocationCount() const;
    5009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5012 
    5013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5015 
    5016 #if VMA_STATS_STRING_ENABLED
    5017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5018 #endif
    5019 
    5020  virtual bool CreateAllocationRequest(
    5021  uint32_t currentFrameIndex,
    5022  uint32_t frameInUseCount,
    5023  VkDeviceSize bufferImageGranularity,
    5024  VkDeviceSize allocSize,
    5025  VkDeviceSize allocAlignment,
    5026  bool upperAddress,
    5027  VmaSuballocationType allocType,
    5028  bool canMakeOtherLost,
    5029  uint32_t strategy,
    5030  VmaAllocationRequest* pAllocationRequest);
    5031 
    5032  virtual bool MakeRequestedAllocationsLost(
    5033  uint32_t currentFrameIndex,
    5034  uint32_t frameInUseCount,
    5035  VmaAllocationRequest* pAllocationRequest);
    5036 
    5037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5038 
    5039  virtual VkResult CheckCorruption(const void* pBlockData);
    5040 
    5041  virtual void Alloc(
    5042  const VmaAllocationRequest& request,
    5043  VmaSuballocationType type,
    5044  VkDeviceSize allocSize,
    5045  bool upperAddress,
    5046  VmaAllocation hAllocation);
    5047 
    5048  virtual void Free(const VmaAllocation allocation);
    5049  virtual void FreeAtOffset(VkDeviceSize offset);
    5050 
    5051 private:
    5052  /*
    5053  There are two suballocation vectors, used in ping-pong way.
    5054  The one with index m_1stVectorIndex is called 1st.
    5055  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5056  2nd can be non-empty only when 1st is not empty.
    5057  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5058  */
    5059  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5060 
    5061  enum SECOND_VECTOR_MODE
    5062  {
    5063  SECOND_VECTOR_EMPTY,
    5064  /*
    5065  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5066  all have smaller offset.
    5067  */
    5068  SECOND_VECTOR_RING_BUFFER,
    5069  /*
    5070  Suballocations in 2nd vector are upper side of double stack.
    5071  They all have offsets higher than those in 1st vector.
    5072  Top of this stack means smaller offsets, but higher indices in this vector.
    5073  */
    5074  SECOND_VECTOR_DOUBLE_STACK,
    5075  };
    5076 
    5077  VkDeviceSize m_SumFreeSize;
    5078  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5079  uint32_t m_1stVectorIndex;
    5080  SECOND_VECTOR_MODE m_2ndVectorMode;
    5081 
    5082  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5083  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5084  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5085  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5086 
    5087  // Number of items in 1st vector with hAllocation = null at the beginning.
    5088  size_t m_1stNullItemsBeginCount;
    5089  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5090  size_t m_1stNullItemsMiddleCount;
    5091  // Number of items in 2nd vector with hAllocation = null.
    5092  size_t m_2ndNullItemsCount;
    5093 
    5094  bool ShouldCompact1st() const;
    5095  void CleanupAfterFree();
    5096 };
    5097 
    5098 /*
    5099 - GetSize() is the original size of allocated memory block.
    5100 - m_UsableSize is this size aligned down to a power of two.
    5101  All allocations and calculations happen relative to m_UsableSize.
    5102 - GetUnusableSize() is the difference between them.
    5103  It is repoted as separate, unused range, not available for allocations.
    5104 
    5105 Node at level 0 has size = m_UsableSize.
    5106 Each next level contains nodes with size 2 times smaller than current level.
    5107 m_LevelCount is the maximum number of levels to use in the current object.
    5108 */
    5109 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5110 {
    5111  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5112 public:
    5113  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5114  virtual ~VmaBlockMetadata_Buddy();
    5115  virtual void Init(VkDeviceSize size);
    5116 
    5117  virtual bool Validate() const;
    5118  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5119  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5120  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5121  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5122 
    5123  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5124  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5125 
    5126 #if VMA_STATS_STRING_ENABLED
    5127  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5128 #endif
    5129 
    5130  virtual bool CreateAllocationRequest(
    5131  uint32_t currentFrameIndex,
    5132  uint32_t frameInUseCount,
    5133  VkDeviceSize bufferImageGranularity,
    5134  VkDeviceSize allocSize,
    5135  VkDeviceSize allocAlignment,
    5136  bool upperAddress,
    5137  VmaSuballocationType allocType,
    5138  bool canMakeOtherLost,
    5139  uint32_t strategy,
    5140  VmaAllocationRequest* pAllocationRequest);
    5141 
    5142  virtual bool MakeRequestedAllocationsLost(
    5143  uint32_t currentFrameIndex,
    5144  uint32_t frameInUseCount,
    5145  VmaAllocationRequest* pAllocationRequest);
    5146 
    5147  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5148 
    5149  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5150 
    5151  virtual void Alloc(
    5152  const VmaAllocationRequest& request,
    5153  VmaSuballocationType type,
    5154  VkDeviceSize allocSize,
    5155  bool upperAddress,
    5156  VmaAllocation hAllocation);
    5157 
    5158  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5159  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5160 
    5161 private:
    5162  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5163  static const size_t MAX_LEVELS = 30;
    5164 
    5165  struct ValidationContext
    5166  {
    5167  size_t calculatedAllocationCount;
    5168  size_t calculatedFreeCount;
    5169  VkDeviceSize calculatedSumFreeSize;
    5170 
    5171  ValidationContext() :
    5172  calculatedAllocationCount(0),
    5173  calculatedFreeCount(0),
    5174  calculatedSumFreeSize(0) { }
    5175  };
    5176 
    5177  struct Node
    5178  {
    5179  VkDeviceSize offset;
    5180  enum TYPE
    5181  {
    5182  TYPE_FREE,
    5183  TYPE_ALLOCATION,
    5184  TYPE_SPLIT,
    5185  TYPE_COUNT
    5186  } type;
    5187  Node* parent;
    5188  Node* buddy;
    5189 
    5190  union
    5191  {
    5192  struct
    5193  {
    5194  Node* prev;
    5195  Node* next;
    5196  } free;
    5197  struct
    5198  {
    5199  VmaAllocation alloc;
    5200  } allocation;
    5201  struct
    5202  {
    5203  Node* leftChild;
    5204  } split;
    5205  };
    5206  };
    5207 
    5208  // Size of the memory block aligned down to a power of two.
    5209  VkDeviceSize m_UsableSize;
    5210  uint32_t m_LevelCount;
    5211 
    5212  Node* m_Root;
    5213  struct {
    5214  Node* front;
    5215  Node* back;
    5216  } m_FreeList[MAX_LEVELS];
    5217  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5218  size_t m_AllocationCount;
    5219  // Number of nodes in the tree with type == TYPE_FREE.
    5220  size_t m_FreeCount;
    5221  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5222  VkDeviceSize m_SumFreeSize;
    5223 
    5224  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5225  void DeleteNode(Node* node);
    5226  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5227  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5228  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5229  // Alloc passed just for validation. Can be null.
    5230  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5231  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5232  // Adds node to the front of FreeList at given level.
    5233  // node->type must be FREE.
    5234  // node->free.prev, next can be undefined.
    5235  void AddToFreeListFront(uint32_t level, Node* node);
    5236  // Removes node from FreeList at given level.
    5237  // node->type must be FREE.
    5238  // node->free.prev, next stay untouched.
    5239  void RemoveFromFreeList(uint32_t level, Node* node);
    5240 
    5241 #if VMA_STATS_STRING_ENABLED
    5242  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5243 #endif
    5244 };
    5245 
    5246 /*
    5247 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5248 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5249 
    5250 Thread-safety: This class must be externally synchronized.
    5251 */
    5252 class VmaDeviceMemoryBlock
    5253 {
    5254  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5255 public:
    5256  VmaBlockMetadata* m_pMetadata;
    5257 
    5258  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5259 
    5260  ~VmaDeviceMemoryBlock()
    5261  {
    5262  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5263  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5264  }
    5265 
    5266  // Always call after construction.
    5267  void Init(
    5268  VmaAllocator hAllocator,
    5269  uint32_t newMemoryTypeIndex,
    5270  VkDeviceMemory newMemory,
    5271  VkDeviceSize newSize,
    5272  uint32_t id,
    5273  uint32_t algorithm);
    5274  // Always call before destruction.
    5275  void Destroy(VmaAllocator allocator);
    5276 
    5277  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5278  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5279  uint32_t GetId() const { return m_Id; }
    5280  void* GetMappedData() const { return m_pMappedData; }
    5281 
    5282  // Validates all data structures inside this object. If not valid, returns false.
    5283  bool Validate() const;
    5284 
    5285  VkResult CheckCorruption(VmaAllocator hAllocator);
    5286 
    5287  // ppData can be null.
    5288  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5289  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5290 
    5291  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5292  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5293 
    5294  VkResult BindBufferMemory(
    5295  const VmaAllocator hAllocator,
    5296  const VmaAllocation hAllocation,
    5297  VkBuffer hBuffer);
    5298  VkResult BindImageMemory(
    5299  const VmaAllocator hAllocator,
    5300  const VmaAllocation hAllocation,
    5301  VkImage hImage);
    5302 
    5303 private:
    5304  uint32_t m_MemoryTypeIndex;
    5305  uint32_t m_Id;
    5306  VkDeviceMemory m_hMemory;
    5307 
    5308  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5309  // Also protects m_MapCount, m_pMappedData.
    5310  VMA_MUTEX m_Mutex;
    5311  uint32_t m_MapCount;
    5312  void* m_pMappedData;
    5313 };
    5314 
    5315 struct VmaPointerLess
    5316 {
    5317  bool operator()(const void* lhs, const void* rhs) const
    5318  {
    5319  return lhs < rhs;
    5320  }
    5321 };
    5322 
    5323 class VmaDefragmentator;
    5324 
    5325 /*
    5326 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5327 Vulkan memory type.
    5328 
    5329 Synchronized internally with a mutex.
    5330 */
    5331 struct VmaBlockVector
    5332 {
    5333  VMA_CLASS_NO_COPY(VmaBlockVector)
    5334 public:
    5335  VmaBlockVector(
    5336  VmaAllocator hAllocator,
    5337  uint32_t memoryTypeIndex,
    5338  VkDeviceSize preferredBlockSize,
    5339  size_t minBlockCount,
    5340  size_t maxBlockCount,
    5341  VkDeviceSize bufferImageGranularity,
    5342  uint32_t frameInUseCount,
    5343  bool isCustomPool,
    5344  bool explicitBlockSize,
    5345  uint32_t algorithm);
    5346  ~VmaBlockVector();
    5347 
    5348  VkResult CreateMinBlocks();
    5349 
    5350  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5351  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5352  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5353  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5354  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5355 
    5356  void GetPoolStats(VmaPoolStats* pStats);
    5357 
    5358  bool IsEmpty() const { return m_Blocks.empty(); }
    5359  bool IsCorruptionDetectionEnabled() const;
    5360 
    5361  VkResult Allocate(
    5362  VmaPool hCurrentPool,
    5363  uint32_t currentFrameIndex,
    5364  VkDeviceSize size,
    5365  VkDeviceSize alignment,
    5366  const VmaAllocationCreateInfo& createInfo,
    5367  VmaSuballocationType suballocType,
    5368  VmaAllocation* pAllocation);
    5369 
    5370  void Free(
    5371  VmaAllocation hAllocation);
    5372 
    5373  // Adds statistics of this BlockVector to pStats.
    5374  void AddStats(VmaStats* pStats);
    5375 
    5376 #if VMA_STATS_STRING_ENABLED
    5377  void PrintDetailedMap(class VmaJsonWriter& json);
    5378 #endif
    5379 
    5380  void MakePoolAllocationsLost(
    5381  uint32_t currentFrameIndex,
    5382  size_t* pLostAllocationCount);
    5383  VkResult CheckCorruption();
    5384 
    5385  VmaDefragmentator* EnsureDefragmentator(
    5386  VmaAllocator hAllocator,
    5387  uint32_t currentFrameIndex);
    5388 
    5389  VkResult Defragment(
    5390  VmaDefragmentationStats* pDefragmentationStats,
    5391  VkDeviceSize& maxBytesToMove,
    5392  uint32_t& maxAllocationsToMove);
    5393 
    5394  void DestroyDefragmentator();
    5395 
    5396 private:
    5397  friend class VmaDefragmentator;
    5398 
    5399  const VmaAllocator m_hAllocator;
    5400  const uint32_t m_MemoryTypeIndex;
    5401  const VkDeviceSize m_PreferredBlockSize;
    5402  const size_t m_MinBlockCount;
    5403  const size_t m_MaxBlockCount;
    5404  const VkDeviceSize m_BufferImageGranularity;
    5405  const uint32_t m_FrameInUseCount;
    5406  const bool m_IsCustomPool;
    5407  const bool m_ExplicitBlockSize;
    5408  const uint32_t m_Algorithm;
    5409  bool m_HasEmptyBlock;
    5410  VMA_MUTEX m_Mutex;
    5411  // Incrementally sorted by sumFreeSize, ascending.
    5412  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5413  /* There can be at most one allocation that is completely empty - a
    5414  hysteresis to avoid pessimistic case of alternating creation and destruction
    5415  of a VkDeviceMemory. */
    5416  VmaDefragmentator* m_pDefragmentator;
    5417  uint32_t m_NextBlockId;
    5418 
    5419  VkDeviceSize CalcMaxBlockSize() const;
    5420 
    5421  // Finds and removes given block from vector.
    5422  void Remove(VmaDeviceMemoryBlock* pBlock);
    5423 
    5424  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5425  // after this call.
    5426  void IncrementallySortBlocks();
    5427 
    5428  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5429  VkResult AllocateFromBlock(
    5430  VmaDeviceMemoryBlock* pBlock,
    5431  VmaPool hCurrentPool,
    5432  uint32_t currentFrameIndex,
    5433  VkDeviceSize size,
    5434  VkDeviceSize alignment,
    5435  VmaAllocationCreateFlags allocFlags,
    5436  void* pUserData,
    5437  VmaSuballocationType suballocType,
    5438  uint32_t strategy,
    5439  VmaAllocation* pAllocation);
    5440 
    5441  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5442 };
    5443 
    5444 struct VmaPool_T
    5445 {
    5446  VMA_CLASS_NO_COPY(VmaPool_T)
    5447 public:
    5448  VmaBlockVector m_BlockVector;
    5449 
    5450  VmaPool_T(
    5451  VmaAllocator hAllocator,
    5452  const VmaPoolCreateInfo& createInfo,
    5453  VkDeviceSize preferredBlockSize);
    5454  ~VmaPool_T();
    5455 
    5456  uint32_t GetId() const { return m_Id; }
    5457  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5458 
    5459 #if VMA_STATS_STRING_ENABLED
    5460  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5461 #endif
    5462 
    5463 private:
    5464  uint32_t m_Id;
    5465 };
    5466 
    5467 class VmaDefragmentator
    5468 {
    5469  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5470 private:
    5471  const VmaAllocator m_hAllocator;
    5472  VmaBlockVector* const m_pBlockVector;
    5473  uint32_t m_CurrentFrameIndex;
    5474  VkDeviceSize m_BytesMoved;
    5475  uint32_t m_AllocationsMoved;
    5476 
    5477  struct AllocationInfo
    5478  {
    5479  VmaAllocation m_hAllocation;
    5480  VkBool32* m_pChanged;
    5481 
    5482  AllocationInfo() :
    5483  m_hAllocation(VK_NULL_HANDLE),
    5484  m_pChanged(VMA_NULL)
    5485  {
    5486  }
    5487  };
    5488 
    5489  struct AllocationInfoSizeGreater
    5490  {
    5491  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5492  {
    5493  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5494  }
    5495  };
    5496 
    5497  // Used between AddAllocation and Defragment.
    5498  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5499 
    5500  struct BlockInfo
    5501  {
    5502  VmaDeviceMemoryBlock* m_pBlock;
    5503  bool m_HasNonMovableAllocations;
    5504  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5505 
    5506  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5507  m_pBlock(VMA_NULL),
    5508  m_HasNonMovableAllocations(true),
    5509  m_Allocations(pAllocationCallbacks),
    5510  m_pMappedDataForDefragmentation(VMA_NULL)
    5511  {
    5512  }
    5513 
    5514  void CalcHasNonMovableAllocations()
    5515  {
    5516  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5517  const size_t defragmentAllocCount = m_Allocations.size();
    5518  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5519  }
    5520 
    5521  void SortAllocationsBySizeDescecnding()
    5522  {
    5523  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5524  }
    5525 
    5526  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5527  void Unmap(VmaAllocator hAllocator);
    5528 
    5529  private:
    5530  // Not null if mapped for defragmentation only, not originally mapped.
    5531  void* m_pMappedDataForDefragmentation;
    5532  };
    5533 
    5534  struct BlockPointerLess
    5535  {
    5536  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5537  {
    5538  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5539  }
    5540  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5541  {
    5542  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5543  }
    5544  };
    5545 
    5546  // 1. Blocks with some non-movable allocations go first.
    5547  // 2. Blocks with smaller sumFreeSize go first.
    5548  struct BlockInfoCompareMoveDestination
    5549  {
    5550  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5551  {
    5552  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5553  {
    5554  return true;
    5555  }
    5556  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5557  {
    5558  return false;
    5559  }
    5560  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5561  {
    5562  return true;
    5563  }
    5564  return false;
    5565  }
    5566  };
    5567 
    5568  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5569  BlockInfoVector m_Blocks;
    5570 
    5571  VkResult DefragmentRound(
    5572  VkDeviceSize maxBytesToMove,
    5573  uint32_t maxAllocationsToMove);
    5574 
    5575  static bool MoveMakesSense(
    5576  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5577  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5578 
    5579 public:
    5580  VmaDefragmentator(
    5581  VmaAllocator hAllocator,
    5582  VmaBlockVector* pBlockVector,
    5583  uint32_t currentFrameIndex);
    5584 
    5585  ~VmaDefragmentator();
    5586 
    5587  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5588  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5589 
    5590  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5591 
    5592  VkResult Defragment(
    5593  VkDeviceSize maxBytesToMove,
    5594  uint32_t maxAllocationsToMove);
    5595 };
    5596 
    5597 #if VMA_RECORDING_ENABLED
    5598 
    5599 class VmaRecorder
    5600 {
    5601 public:
    5602  VmaRecorder();
    5603  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5604  void WriteConfiguration(
    5605  const VkPhysicalDeviceProperties& devProps,
    5606  const VkPhysicalDeviceMemoryProperties& memProps,
    5607  bool dedicatedAllocationExtensionEnabled);
    5608  ~VmaRecorder();
    5609 
    5610  void RecordCreateAllocator(uint32_t frameIndex);
    5611  void RecordDestroyAllocator(uint32_t frameIndex);
    5612  void RecordCreatePool(uint32_t frameIndex,
    5613  const VmaPoolCreateInfo& createInfo,
    5614  VmaPool pool);
    5615  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5616  void RecordAllocateMemory(uint32_t frameIndex,
    5617  const VkMemoryRequirements& vkMemReq,
    5618  const VmaAllocationCreateInfo& createInfo,
    5619  VmaAllocation allocation);
    5620  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5621  const VkMemoryRequirements& vkMemReq,
    5622  bool requiresDedicatedAllocation,
    5623  bool prefersDedicatedAllocation,
    5624  const VmaAllocationCreateInfo& createInfo,
    5625  VmaAllocation allocation);
    5626  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5627  const VkMemoryRequirements& vkMemReq,
    5628  bool requiresDedicatedAllocation,
    5629  bool prefersDedicatedAllocation,
    5630  const VmaAllocationCreateInfo& createInfo,
    5631  VmaAllocation allocation);
    5632  void RecordFreeMemory(uint32_t frameIndex,
    5633  VmaAllocation allocation);
    5634  void RecordResizeAllocation(
    5635  uint32_t frameIndex,
    5636  VmaAllocation allocation,
    5637  VkDeviceSize newSize);
    5638  void RecordSetAllocationUserData(uint32_t frameIndex,
    5639  VmaAllocation allocation,
    5640  const void* pUserData);
    5641  void RecordCreateLostAllocation(uint32_t frameIndex,
    5642  VmaAllocation allocation);
    5643  void RecordMapMemory(uint32_t frameIndex,
    5644  VmaAllocation allocation);
    5645  void RecordUnmapMemory(uint32_t frameIndex,
    5646  VmaAllocation allocation);
    5647  void RecordFlushAllocation(uint32_t frameIndex,
    5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5649  void RecordInvalidateAllocation(uint32_t frameIndex,
    5650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5651  void RecordCreateBuffer(uint32_t frameIndex,
    5652  const VkBufferCreateInfo& bufCreateInfo,
    5653  const VmaAllocationCreateInfo& allocCreateInfo,
    5654  VmaAllocation allocation);
    5655  void RecordCreateImage(uint32_t frameIndex,
    5656  const VkImageCreateInfo& imageCreateInfo,
    5657  const VmaAllocationCreateInfo& allocCreateInfo,
    5658  VmaAllocation allocation);
    5659  void RecordDestroyBuffer(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordDestroyImage(uint32_t frameIndex,
    5662  VmaAllocation allocation);
    5663  void RecordTouchAllocation(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordGetAllocationInfo(uint32_t frameIndex,
    5666  VmaAllocation allocation);
    5667  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5668  VmaPool pool);
    5669 
    5670 private:
    5671  struct CallParams
    5672  {
    5673  uint32_t threadId;
    5674  double time;
    5675  };
    5676 
    5677  class UserDataString
    5678  {
    5679  public:
    5680  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5681  const char* GetString() const { return m_Str; }
    5682 
    5683  private:
    5684  char m_PtrStr[17];
    5685  const char* m_Str;
    5686  };
    5687 
    5688  bool m_UseMutex;
    5689  VmaRecordFlags m_Flags;
    5690  FILE* m_File;
    5691  VMA_MUTEX m_FileMutex;
    5692  int64_t m_Freq;
    5693  int64_t m_StartCounter;
    5694 
    5695  void GetBasicParams(CallParams& outParams);
    5696  void Flush();
    5697 };
    5698 
    5699 #endif // #if VMA_RECORDING_ENABLED
    5700 
    5701 // Main allocator object.
    5702 struct VmaAllocator_T
    5703 {
    5704  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5705 public:
    5706  bool m_UseMutex;
    5707  bool m_UseKhrDedicatedAllocation;
    5708  VkDevice m_hDevice;
    5709  bool m_AllocationCallbacksSpecified;
    5710  VkAllocationCallbacks m_AllocationCallbacks;
    5711  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5712 
    5713  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5714  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5715  VMA_MUTEX m_HeapSizeLimitMutex;
    5716 
    5717  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5718  VkPhysicalDeviceMemoryProperties m_MemProps;
    5719 
    5720  // Default pools.
    5721  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5722 
    5723  // Each vector is sorted by memory (handle value).
    5724  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5725  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5726  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5727 
    5728  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5729  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5730  ~VmaAllocator_T();
    5731 
    5732  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5733  {
    5734  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5735  }
    5736  const VmaVulkanFunctions& GetVulkanFunctions() const
    5737  {
    5738  return m_VulkanFunctions;
    5739  }
    5740 
    5741  VkDeviceSize GetBufferImageGranularity() const
    5742  {
    5743  return VMA_MAX(
    5744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5746  }
    5747 
    5748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5750 
    5751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5752  {
    5753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5755  }
    5756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5758  {
    5759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5761  }
    5762  // Minimum alignment for all allocations in specific memory type.
    5763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5764  {
    5765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5768  }
    5769 
    5770  bool IsIntegratedGpu() const
    5771  {
    5772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5773  }
    5774 
    5775 #if VMA_RECORDING_ENABLED
    5776  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5777 #endif
    5778 
    5779  void GetBufferMemoryRequirements(
    5780  VkBuffer hBuffer,
    5781  VkMemoryRequirements& memReq,
    5782  bool& requiresDedicatedAllocation,
    5783  bool& prefersDedicatedAllocation) const;
    5784  void GetImageMemoryRequirements(
    5785  VkImage hImage,
    5786  VkMemoryRequirements& memReq,
    5787  bool& requiresDedicatedAllocation,
    5788  bool& prefersDedicatedAllocation) const;
    5789 
    5790  // Main allocation function.
    5791  VkResult AllocateMemory(
    5792  const VkMemoryRequirements& vkMemReq,
    5793  bool requiresDedicatedAllocation,
    5794  bool prefersDedicatedAllocation,
    5795  VkBuffer dedicatedBuffer,
    5796  VkImage dedicatedImage,
    5797  const VmaAllocationCreateInfo& createInfo,
    5798  VmaSuballocationType suballocType,
    5799  VmaAllocation* pAllocation);
    5800 
    5801  // Main deallocation function.
    5802  void FreeMemory(const VmaAllocation allocation);
    5803 
    5804  VkResult ResizeAllocation(
    5805  const VmaAllocation alloc,
    5806  VkDeviceSize newSize);
    5807 
    5808  void CalculateStats(VmaStats* pStats);
    5809 
    5810 #if VMA_STATS_STRING_ENABLED
    5811  void PrintDetailedMap(class VmaJsonWriter& json);
    5812 #endif
    5813 
    5814  VkResult Defragment(
    5815  VmaAllocation* pAllocations,
    5816  size_t allocationCount,
    5817  VkBool32* pAllocationsChanged,
    5818  const VmaDefragmentationInfo* pDefragmentationInfo,
    5819  VmaDefragmentationStats* pDefragmentationStats);
    5820 
    5821  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5822  bool TouchAllocation(VmaAllocation hAllocation);
    5823 
    5824  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5825  void DestroyPool(VmaPool pool);
    5826  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5827 
    5828  void SetCurrentFrameIndex(uint32_t frameIndex);
    5829  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5830 
    5831  void MakePoolAllocationsLost(
    5832  VmaPool hPool,
    5833  size_t* pLostAllocationCount);
    5834  VkResult CheckPoolCorruption(VmaPool hPool);
    5835  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5836 
    5837  void CreateLostAllocation(VmaAllocation* pAllocation);
    5838 
    5839  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5840  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5841 
    5842  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5843  void Unmap(VmaAllocation hAllocation);
    5844 
    5845  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5846  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5847 
    5848  void FlushOrInvalidateAllocation(
    5849  VmaAllocation hAllocation,
    5850  VkDeviceSize offset, VkDeviceSize size,
    5851  VMA_CACHE_OPERATION op);
    5852 
    5853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5854 
    5855 private:
    5856  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5857 
    5858  VkPhysicalDevice m_PhysicalDevice;
    5859  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5860 
    5861  VMA_MUTEX m_PoolsMutex;
    5862  // Protected by m_PoolsMutex. Sorted by pointer value.
    5863  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5864  uint32_t m_NextPoolId;
    5865 
    5866  VmaVulkanFunctions m_VulkanFunctions;
    5867 
    5868 #if VMA_RECORDING_ENABLED
    5869  VmaRecorder* m_pRecorder;
    5870 #endif
    5871 
    5872  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5873 
    5874  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5875 
    5876  VkResult AllocateMemoryOfType(
    5877  VkDeviceSize size,
    5878  VkDeviceSize alignment,
    5879  bool dedicatedAllocation,
    5880  VkBuffer dedicatedBuffer,
    5881  VkImage dedicatedImage,
    5882  const VmaAllocationCreateInfo& createInfo,
    5883  uint32_t memTypeIndex,
    5884  VmaSuballocationType suballocType,
    5885  VmaAllocation* pAllocation);
    5886 
    5887  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5888  VkResult AllocateDedicatedMemory(
    5889  VkDeviceSize size,
    5890  VmaSuballocationType suballocType,
    5891  uint32_t memTypeIndex,
    5892  bool map,
    5893  bool isUserDataString,
    5894  void* pUserData,
    5895  VkBuffer dedicatedBuffer,
    5896  VkImage dedicatedImage,
    5897  VmaAllocation* pAllocation);
    5898 
    5899  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5900  void FreeDedicatedMemory(VmaAllocation allocation);
    5901 };
    5902 
    5904 // Memory allocation #2 after VmaAllocator_T definition
    5905 
    5906 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5907 {
    5908  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5909 }
    5910 
    5911 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5912 {
    5913  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5914 }
    5915 
    5916 template<typename T>
    5917 static T* VmaAllocate(VmaAllocator hAllocator)
    5918 {
    5919  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5920 }
    5921 
    5922 template<typename T>
    5923 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5924 {
    5925  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5926 }
    5927 
    5928 template<typename T>
    5929 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5930 {
    5931  if(ptr != VMA_NULL)
    5932  {
    5933  ptr->~T();
    5934  VmaFree(hAllocator, ptr);
    5935  }
    5936 }
    5937 
    5938 template<typename T>
    5939 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5940 {
    5941  if(ptr != VMA_NULL)
    5942  {
    5943  for(size_t i = count; i--; )
    5944  ptr[i].~T();
    5945  VmaFree(hAllocator, ptr);
    5946  }
    5947 }
    5948 
    5950 // VmaStringBuilder
    5951 
    5952 #if VMA_STATS_STRING_ENABLED
    5953 
    5954 class VmaStringBuilder
    5955 {
    5956 public:
    5957  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5958  size_t GetLength() const { return m_Data.size(); }
    5959  const char* GetData() const { return m_Data.data(); }
    5960 
    5961  void Add(char ch) { m_Data.push_back(ch); }
    5962  void Add(const char* pStr);
    5963  void AddNewLine() { Add('\n'); }
    5964  void AddNumber(uint32_t num);
    5965  void AddNumber(uint64_t num);
    5966  void AddPointer(const void* ptr);
    5967 
    5968 private:
    5969  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5970 };
    5971 
    5972 void VmaStringBuilder::Add(const char* pStr)
    5973 {
    5974  const size_t strLen = strlen(pStr);
    5975  if(strLen > 0)
    5976  {
    5977  const size_t oldCount = m_Data.size();
    5978  m_Data.resize(oldCount + strLen);
    5979  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5980  }
    5981 }
    5982 
    5983 void VmaStringBuilder::AddNumber(uint32_t num)
    5984 {
    5985  char buf[11];
    5986  VmaUint32ToStr(buf, sizeof(buf), num);
    5987  Add(buf);
    5988 }
    5989 
    5990 void VmaStringBuilder::AddNumber(uint64_t num)
    5991 {
    5992  char buf[21];
    5993  VmaUint64ToStr(buf, sizeof(buf), num);
    5994  Add(buf);
    5995 }
    5996 
    5997 void VmaStringBuilder::AddPointer(const void* ptr)
    5998 {
    5999  char buf[21];
    6000  VmaPtrToStr(buf, sizeof(buf), ptr);
    6001  Add(buf);
    6002 }
    6003 
    6004 #endif // #if VMA_STATS_STRING_ENABLED
    6005 
    6007 // VmaJsonWriter
    6008 
    6009 #if VMA_STATS_STRING_ENABLED
    6010 
    6011 class VmaJsonWriter
    6012 {
    6013  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6014 public:
    6015  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6016  ~VmaJsonWriter();
    6017 
    6018  void BeginObject(bool singleLine = false);
    6019  void EndObject();
    6020 
    6021  void BeginArray(bool singleLine = false);
    6022  void EndArray();
    6023 
    6024  void WriteString(const char* pStr);
    6025  void BeginString(const char* pStr = VMA_NULL);
    6026  void ContinueString(const char* pStr);
    6027  void ContinueString(uint32_t n);
    6028  void ContinueString(uint64_t n);
    6029  void ContinueString_Pointer(const void* ptr);
    6030  void EndString(const char* pStr = VMA_NULL);
    6031 
    6032  void WriteNumber(uint32_t n);
    6033  void WriteNumber(uint64_t n);
    6034  void WriteBool(bool b);
    6035  void WriteNull();
    6036 
    6037 private:
    6038  static const char* const INDENT;
    6039 
    6040  enum COLLECTION_TYPE
    6041  {
    6042  COLLECTION_TYPE_OBJECT,
    6043  COLLECTION_TYPE_ARRAY,
    6044  };
    6045  struct StackItem
    6046  {
    6047  COLLECTION_TYPE type;
    6048  uint32_t valueCount;
    6049  bool singleLineMode;
    6050  };
    6051 
    6052  VmaStringBuilder& m_SB;
    6053  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6054  bool m_InsideString;
    6055 
    6056  void BeginValue(bool isString);
    6057  void WriteIndent(bool oneLess = false);
    6058 };
    6059 
    6060 const char* const VmaJsonWriter::INDENT = " ";
    6061 
    6062 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6063  m_SB(sb),
    6064  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6065  m_InsideString(false)
    6066 {
    6067 }
    6068 
    6069 VmaJsonWriter::~VmaJsonWriter()
    6070 {
    6071  VMA_ASSERT(!m_InsideString);
    6072  VMA_ASSERT(m_Stack.empty());
    6073 }
    6074 
    6075 void VmaJsonWriter::BeginObject(bool singleLine)
    6076 {
    6077  VMA_ASSERT(!m_InsideString);
    6078 
    6079  BeginValue(false);
    6080  m_SB.Add('{');
    6081 
    6082  StackItem item;
    6083  item.type = COLLECTION_TYPE_OBJECT;
    6084  item.valueCount = 0;
    6085  item.singleLineMode = singleLine;
    6086  m_Stack.push_back(item);
    6087 }
    6088 
    6089 void VmaJsonWriter::EndObject()
    6090 {
    6091  VMA_ASSERT(!m_InsideString);
    6092 
    6093  WriteIndent(true);
    6094  m_SB.Add('}');
    6095 
    6096  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6097  m_Stack.pop_back();
    6098 }
    6099 
    6100 void VmaJsonWriter::BeginArray(bool singleLine)
    6101 {
    6102  VMA_ASSERT(!m_InsideString);
    6103 
    6104  BeginValue(false);
    6105  m_SB.Add('[');
    6106 
    6107  StackItem item;
    6108  item.type = COLLECTION_TYPE_ARRAY;
    6109  item.valueCount = 0;
    6110  item.singleLineMode = singleLine;
    6111  m_Stack.push_back(item);
    6112 }
    6113 
    6114 void VmaJsonWriter::EndArray()
    6115 {
    6116  VMA_ASSERT(!m_InsideString);
    6117 
    6118  WriteIndent(true);
    6119  m_SB.Add(']');
    6120 
    6121  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6122  m_Stack.pop_back();
    6123 }
    6124 
    6125 void VmaJsonWriter::WriteString(const char* pStr)
    6126 {
    6127  BeginString(pStr);
    6128  EndString();
    6129 }
    6130 
    6131 void VmaJsonWriter::BeginString(const char* pStr)
    6132 {
    6133  VMA_ASSERT(!m_InsideString);
    6134 
    6135  BeginValue(true);
    6136  m_SB.Add('"');
    6137  m_InsideString = true;
    6138  if(pStr != VMA_NULL && pStr[0] != '\0')
    6139  {
    6140  ContinueString(pStr);
    6141  }
    6142 }
    6143 
    6144 void VmaJsonWriter::ContinueString(const char* pStr)
    6145 {
    6146  VMA_ASSERT(m_InsideString);
    6147 
    6148  const size_t strLen = strlen(pStr);
    6149  for(size_t i = 0; i < strLen; ++i)
    6150  {
    6151  char ch = pStr[i];
    6152  if(ch == '\\')
    6153  {
    6154  m_SB.Add("\\\\");
    6155  }
    6156  else if(ch == '"')
    6157  {
    6158  m_SB.Add("\\\"");
    6159  }
    6160  else if(ch >= 32)
    6161  {
    6162  m_SB.Add(ch);
    6163  }
    6164  else switch(ch)
    6165  {
    6166  case '\b':
    6167  m_SB.Add("\\b");
    6168  break;
    6169  case '\f':
    6170  m_SB.Add("\\f");
    6171  break;
    6172  case '\n':
    6173  m_SB.Add("\\n");
    6174  break;
    6175  case '\r':
    6176  m_SB.Add("\\r");
    6177  break;
    6178  case '\t':
    6179  m_SB.Add("\\t");
    6180  break;
    6181  default:
    6182  VMA_ASSERT(0 && "Character not currently supported.");
    6183  break;
    6184  }
    6185  }
    6186 }
    6187 
    6188 void VmaJsonWriter::ContinueString(uint32_t n)
    6189 {
    6190  VMA_ASSERT(m_InsideString);
    6191  m_SB.AddNumber(n);
    6192 }
    6193 
    6194 void VmaJsonWriter::ContinueString(uint64_t n)
    6195 {
    6196  VMA_ASSERT(m_InsideString);
    6197  m_SB.AddNumber(n);
    6198 }
    6199 
    6200 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6201 {
    6202  VMA_ASSERT(m_InsideString);
    6203  m_SB.AddPointer(ptr);
    6204 }
    6205 
    6206 void VmaJsonWriter::EndString(const char* pStr)
    6207 {
    6208  VMA_ASSERT(m_InsideString);
    6209  if(pStr != VMA_NULL && pStr[0] != '\0')
    6210  {
    6211  ContinueString(pStr);
    6212  }
    6213  m_SB.Add('"');
    6214  m_InsideString = false;
    6215 }
    6216 
    6217 void VmaJsonWriter::WriteNumber(uint32_t n)
    6218 {
    6219  VMA_ASSERT(!m_InsideString);
    6220  BeginValue(false);
    6221  m_SB.AddNumber(n);
    6222 }
    6223 
    6224 void VmaJsonWriter::WriteNumber(uint64_t n)
    6225 {
    6226  VMA_ASSERT(!m_InsideString);
    6227  BeginValue(false);
    6228  m_SB.AddNumber(n);
    6229 }
    6230 
    6231 void VmaJsonWriter::WriteBool(bool b)
    6232 {
    6233  VMA_ASSERT(!m_InsideString);
    6234  BeginValue(false);
    6235  m_SB.Add(b ? "true" : "false");
    6236 }
    6237 
    6238 void VmaJsonWriter::WriteNull()
    6239 {
    6240  VMA_ASSERT(!m_InsideString);
    6241  BeginValue(false);
    6242  m_SB.Add("null");
    6243 }
    6244 
    6245 void VmaJsonWriter::BeginValue(bool isString)
    6246 {
    6247  if(!m_Stack.empty())
    6248  {
    6249  StackItem& currItem = m_Stack.back();
    6250  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6251  currItem.valueCount % 2 == 0)
    6252  {
    6253  VMA_ASSERT(isString);
    6254  }
    6255 
    6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6257  currItem.valueCount % 2 != 0)
    6258  {
    6259  m_SB.Add(": ");
    6260  }
    6261  else if(currItem.valueCount > 0)
    6262  {
    6263  m_SB.Add(", ");
    6264  WriteIndent();
    6265  }
    6266  else
    6267  {
    6268  WriteIndent();
    6269  }
    6270  ++currItem.valueCount;
    6271  }
    6272 }
    6273 
    6274 void VmaJsonWriter::WriteIndent(bool oneLess)
    6275 {
    6276  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6277  {
    6278  m_SB.AddNewLine();
    6279 
    6280  size_t count = m_Stack.size();
    6281  if(count > 0 && oneLess)
    6282  {
    6283  --count;
    6284  }
    6285  for(size_t i = 0; i < count; ++i)
    6286  {
    6287  m_SB.Add(INDENT);
    6288  }
    6289  }
    6290 }
    6291 
    6292 #endif // #if VMA_STATS_STRING_ENABLED
    6293 
    6295 
    6296 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6297 {
    6298  if(IsUserDataString())
    6299  {
    6300  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6301 
    6302  FreeUserDataString(hAllocator);
    6303 
    6304  if(pUserData != VMA_NULL)
    6305  {
    6306  const char* const newStrSrc = (char*)pUserData;
    6307  const size_t newStrLen = strlen(newStrSrc);
    6308  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6309  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6310  m_pUserData = newStrDst;
    6311  }
    6312  }
    6313  else
    6314  {
    6315  m_pUserData = pUserData;
    6316  }
    6317 }
    6318 
    6319 void VmaAllocation_T::ChangeBlockAllocation(
    6320  VmaAllocator hAllocator,
    6321  VmaDeviceMemoryBlock* block,
    6322  VkDeviceSize offset)
    6323 {
    6324  VMA_ASSERT(block != VMA_NULL);
    6325  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6326 
    6327  // Move mapping reference counter from old block to new block.
    6328  if(block != m_BlockAllocation.m_Block)
    6329  {
    6330  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6331  if(IsPersistentMap())
    6332  ++mapRefCount;
    6333  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6334  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6335  }
    6336 
    6337  m_BlockAllocation.m_Block = block;
    6338  m_BlockAllocation.m_Offset = offset;
    6339 }
    6340 
    6341 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6342 {
    6343  VMA_ASSERT(newSize > 0);
    6344  m_Size = newSize;
    6345 }
    6346 
    6347 VkDeviceSize VmaAllocation_T::GetOffset() const
    6348 {
    6349  switch(m_Type)
    6350  {
    6351  case ALLOCATION_TYPE_BLOCK:
    6352  return m_BlockAllocation.m_Offset;
    6353  case ALLOCATION_TYPE_DEDICATED:
    6354  return 0;
    6355  default:
    6356  VMA_ASSERT(0);
    6357  return 0;
    6358  }
    6359 }
    6360 
    6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6362 {
    6363  switch(m_Type)
    6364  {
    6365  case ALLOCATION_TYPE_BLOCK:
    6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6367  case ALLOCATION_TYPE_DEDICATED:
    6368  return m_DedicatedAllocation.m_hMemory;
    6369  default:
    6370  VMA_ASSERT(0);
    6371  return VK_NULL_HANDLE;
    6372  }
    6373 }
    6374 
    6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6376 {
    6377  switch(m_Type)
    6378  {
    6379  case ALLOCATION_TYPE_BLOCK:
    6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6381  case ALLOCATION_TYPE_DEDICATED:
    6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6383  default:
    6384  VMA_ASSERT(0);
    6385  return UINT32_MAX;
    6386  }
    6387 }
    6388 
    6389 void* VmaAllocation_T::GetMappedData() const
    6390 {
    6391  switch(m_Type)
    6392  {
    6393  case ALLOCATION_TYPE_BLOCK:
    6394  if(m_MapCount != 0)
    6395  {
    6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6397  VMA_ASSERT(pBlockData != VMA_NULL);
    6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6399  }
    6400  else
    6401  {
    6402  return VMA_NULL;
    6403  }
    6404  break;
    6405  case ALLOCATION_TYPE_DEDICATED:
    6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6407  return m_DedicatedAllocation.m_pMappedData;
    6408  default:
    6409  VMA_ASSERT(0);
    6410  return VMA_NULL;
    6411  }
    6412 }
    6413 
    6414 bool VmaAllocation_T::CanBecomeLost() const
    6415 {
    6416  switch(m_Type)
    6417  {
    6418  case ALLOCATION_TYPE_BLOCK:
    6419  return m_BlockAllocation.m_CanBecomeLost;
    6420  case ALLOCATION_TYPE_DEDICATED:
    6421  return false;
    6422  default:
    6423  VMA_ASSERT(0);
    6424  return false;
    6425  }
    6426 }
    6427 
    6428 VmaPool VmaAllocation_T::GetPool() const
    6429 {
    6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6431  return m_BlockAllocation.m_hPool;
    6432 }
    6433 
    6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6435 {
    6436  VMA_ASSERT(CanBecomeLost());
    6437 
    6438  /*
    6439  Warning: This is a carefully designed algorithm.
    6440  Do not modify unless you really know what you're doing :)
    6441  */
    6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6443  for(;;)
    6444  {
    6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6446  {
    6447  VMA_ASSERT(0);
    6448  return false;
    6449  }
    6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6451  {
    6452  return false;
    6453  }
    6454  else // Last use time earlier than current time.
    6455  {
    6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6457  {
    6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6460  return true;
    6461  }
    6462  }
    6463  }
    6464 }
    6465 
    6466 #if VMA_STATS_STRING_ENABLED
    6467 
    6468 // Correspond to values of enum VmaSuballocationType.
    6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6470  "FREE",
    6471  "UNKNOWN",
    6472  "BUFFER",
    6473  "IMAGE_UNKNOWN",
    6474  "IMAGE_LINEAR",
    6475  "IMAGE_OPTIMAL",
    6476 };
    6477 
    6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6479 {
    6480  json.WriteString("Type");
    6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6482 
    6483  json.WriteString("Size");
    6484  json.WriteNumber(m_Size);
    6485 
    6486  if(m_pUserData != VMA_NULL)
    6487  {
    6488  json.WriteString("UserData");
    6489  if(IsUserDataString())
    6490  {
    6491  json.WriteString((const char*)m_pUserData);
    6492  }
    6493  else
    6494  {
    6495  json.BeginString();
    6496  json.ContinueString_Pointer(m_pUserData);
    6497  json.EndString();
    6498  }
    6499  }
    6500 
    6501  json.WriteString("CreationFrameIndex");
    6502  json.WriteNumber(m_CreationFrameIndex);
    6503 
    6504  json.WriteString("LastUseFrameIndex");
    6505  json.WriteNumber(GetLastUseFrameIndex());
    6506 
    6507  if(m_BufferImageUsage != 0)
    6508  {
    6509  json.WriteString("Usage");
    6510  json.WriteNumber(m_BufferImageUsage);
    6511  }
    6512 }
    6513 
    6514 #endif
    6515 
    6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6517 {
    6518  VMA_ASSERT(IsUserDataString());
    6519  if(m_pUserData != VMA_NULL)
    6520  {
    6521  char* const oldStr = (char*)m_pUserData;
    6522  const size_t oldStrLen = strlen(oldStr);
    6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6524  m_pUserData = VMA_NULL;
    6525  }
    6526 }
    6527 
    6528 void VmaAllocation_T::BlockAllocMap()
    6529 {
    6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6531 
    6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6533  {
    6534  ++m_MapCount;
    6535  }
    6536  else
    6537  {
    6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6539  }
    6540 }
    6541 
    6542 void VmaAllocation_T::BlockAllocUnmap()
    6543 {
    6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6545 
    6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6547  {
    6548  --m_MapCount;
    6549  }
    6550  else
    6551  {
    6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6553  }
    6554 }
    6555 
    6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6557 {
    6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6559 
    6560  if(m_MapCount != 0)
    6561  {
    6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6563  {
    6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6565  *ppData = m_DedicatedAllocation.m_pMappedData;
    6566  ++m_MapCount;
    6567  return VK_SUCCESS;
    6568  }
    6569  else
    6570  {
    6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6572  return VK_ERROR_MEMORY_MAP_FAILED;
    6573  }
    6574  }
    6575  else
    6576  {
    6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6578  hAllocator->m_hDevice,
    6579  m_DedicatedAllocation.m_hMemory,
    6580  0, // offset
    6581  VK_WHOLE_SIZE,
    6582  0, // flags
    6583  ppData);
    6584  if(result == VK_SUCCESS)
    6585  {
    6586  m_DedicatedAllocation.m_pMappedData = *ppData;
    6587  m_MapCount = 1;
    6588  }
    6589  return result;
    6590  }
    6591 }
    6592 
    6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6594 {
    6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6596 
    6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6598  {
    6599  --m_MapCount;
    6600  if(m_MapCount == 0)
    6601  {
    6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6604  hAllocator->m_hDevice,
    6605  m_DedicatedAllocation.m_hMemory);
    6606  }
    6607  }
    6608  else
    6609  {
    6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6611  }
    6612 }
    6613 
    6614 #if VMA_STATS_STRING_ENABLED
    6615 
    6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6617 {
    6618  json.BeginObject();
    6619 
    6620  json.WriteString("Blocks");
    6621  json.WriteNumber(stat.blockCount);
    6622 
    6623  json.WriteString("Allocations");
    6624  json.WriteNumber(stat.allocationCount);
    6625 
    6626  json.WriteString("UnusedRanges");
    6627  json.WriteNumber(stat.unusedRangeCount);
    6628 
    6629  json.WriteString("UsedBytes");
    6630  json.WriteNumber(stat.usedBytes);
    6631 
    6632  json.WriteString("UnusedBytes");
    6633  json.WriteNumber(stat.unusedBytes);
    6634 
    6635  if(stat.allocationCount > 1)
    6636  {
    6637  json.WriteString("AllocationSize");
    6638  json.BeginObject(true);
    6639  json.WriteString("Min");
    6640  json.WriteNumber(stat.allocationSizeMin);
    6641  json.WriteString("Avg");
    6642  json.WriteNumber(stat.allocationSizeAvg);
    6643  json.WriteString("Max");
    6644  json.WriteNumber(stat.allocationSizeMax);
    6645  json.EndObject();
    6646  }
    6647 
    6648  if(stat.unusedRangeCount > 1)
    6649  {
    6650  json.WriteString("UnusedRangeSize");
    6651  json.BeginObject(true);
    6652  json.WriteString("Min");
    6653  json.WriteNumber(stat.unusedRangeSizeMin);
    6654  json.WriteString("Avg");
    6655  json.WriteNumber(stat.unusedRangeSizeAvg);
    6656  json.WriteString("Max");
    6657  json.WriteNumber(stat.unusedRangeSizeMax);
    6658  json.EndObject();
    6659  }
    6660 
    6661  json.EndObject();
    6662 }
    6663 
    6664 #endif // #if VMA_STATS_STRING_ENABLED
    6665 
    6666 struct VmaSuballocationItemSizeLess
    6667 {
    6668  bool operator()(
    6669  const VmaSuballocationList::iterator lhs,
    6670  const VmaSuballocationList::iterator rhs) const
    6671  {
    6672  return lhs->size < rhs->size;
    6673  }
    6674  bool operator()(
    6675  const VmaSuballocationList::iterator lhs,
    6676  VkDeviceSize rhsSize) const
    6677  {
    6678  return lhs->size < rhsSize;
    6679  }
    6680 };
    6681 
    6682 
    6684 // class VmaBlockMetadata
    6685 
    6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6687  m_Size(0),
    6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6689 {
    6690 }
    6691 
    6692 #if VMA_STATS_STRING_ENABLED
    6693 
    6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6695  VkDeviceSize unusedBytes,
    6696  size_t allocationCount,
    6697  size_t unusedRangeCount) const
    6698 {
    6699  json.BeginObject();
    6700 
    6701  json.WriteString("TotalBytes");
    6702  json.WriteNumber(GetSize());
    6703 
    6704  json.WriteString("UnusedBytes");
    6705  json.WriteNumber(unusedBytes);
    6706 
    6707  json.WriteString("Allocations");
    6708  json.WriteNumber((uint64_t)allocationCount);
    6709 
    6710  json.WriteString("UnusedRanges");
    6711  json.WriteNumber((uint64_t)unusedRangeCount);
    6712 
    6713  json.WriteString("Suballocations");
    6714  json.BeginArray();
    6715 }
    6716 
    6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6718  VkDeviceSize offset,
    6719  VmaAllocation hAllocation) const
    6720 {
    6721  json.BeginObject(true);
    6722 
    6723  json.WriteString("Offset");
    6724  json.WriteNumber(offset);
    6725 
    6726  hAllocation->PrintParameters(json);
    6727 
    6728  json.EndObject();
    6729 }
    6730 
    6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6732  VkDeviceSize offset,
    6733  VkDeviceSize size) const
    6734 {
    6735  json.BeginObject(true);
    6736 
    6737  json.WriteString("Offset");
    6738  json.WriteNumber(offset);
    6739 
    6740  json.WriteString("Type");
    6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6742 
    6743  json.WriteString("Size");
    6744  json.WriteNumber(size);
    6745 
    6746  json.EndObject();
    6747 }
    6748 
    6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6750 {
    6751  json.EndArray();
    6752  json.EndObject();
    6753 }
    6754 
    6755 #endif // #if VMA_STATS_STRING_ENABLED
    6756 
    6758 // class VmaBlockMetadata_Generic
    6759 
    6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6761  VmaBlockMetadata(hAllocator),
    6762  m_FreeCount(0),
    6763  m_SumFreeSize(0),
    6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6766 {
    6767 }
    6768 
    6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6770 {
    6771 }
    6772 
    6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6774 {
    6775  VmaBlockMetadata::Init(size);
    6776 
    6777  m_FreeCount = 1;
    6778  m_SumFreeSize = size;
    6779 
    6780  VmaSuballocation suballoc = {};
    6781  suballoc.offset = 0;
    6782  suballoc.size = size;
    6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6784  suballoc.hAllocation = VK_NULL_HANDLE;
    6785 
    6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6787  m_Suballocations.push_back(suballoc);
    6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6789  --suballocItem;
    6790  m_FreeSuballocationsBySize.push_back(suballocItem);
    6791 }
    6792 
    6793 bool VmaBlockMetadata_Generic::Validate() const
    6794 {
    6795  VMA_VALIDATE(!m_Suballocations.empty());
    6796 
    6797  // Expected offset of new suballocation as calculated from previous ones.
    6798  VkDeviceSize calculatedOffset = 0;
    6799  // Expected number of free suballocations as calculated from traversing their list.
    6800  uint32_t calculatedFreeCount = 0;
    6801  // Expected sum size of free suballocations as calculated from traversing their list.
    6802  VkDeviceSize calculatedSumFreeSize = 0;
    6803  // Expected number of free suballocations that should be registered in
    6804  // m_FreeSuballocationsBySize calculated from traversing their list.
    6805  size_t freeSuballocationsToRegister = 0;
    6806  // True if previous visited suballocation was free.
    6807  bool prevFree = false;
    6808 
    6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6810  suballocItem != m_Suballocations.cend();
    6811  ++suballocItem)
    6812  {
    6813  const VmaSuballocation& subAlloc = *suballocItem;
    6814 
    6815  // Actual offset of this suballocation doesn't match expected one.
    6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6817 
    6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6819  // Two adjacent free suballocations are invalid. They should be merged.
    6820  VMA_VALIDATE(!prevFree || !currFree);
    6821 
    6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6823 
    6824  if(currFree)
    6825  {
    6826  calculatedSumFreeSize += subAlloc.size;
    6827  ++calculatedFreeCount;
    6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6829  {
    6830  ++freeSuballocationsToRegister;
    6831  }
    6832 
    6833  // Margin required between allocations - every free space must be at least that large.
    6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6835  }
    6836  else
    6837  {
    6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6840 
    6841  // Margin required between allocations - previous allocation must be free.
    6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6843  }
    6844 
    6845  calculatedOffset += subAlloc.size;
    6846  prevFree = currFree;
    6847  }
    6848 
    6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6850  // match expected one.
    6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6852 
    6853  VkDeviceSize lastSize = 0;
    6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6855  {
    6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6857 
    6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6860  // They must be sorted by size ascending.
    6861  VMA_VALIDATE(suballocItem->size >= lastSize);
    6862 
    6863  lastSize = suballocItem->size;
    6864  }
    6865 
    6866  // Check if totals match calculacted values.
    6867  VMA_VALIDATE(ValidateFreeSuballocationList());
    6868  VMA_VALIDATE(calculatedOffset == GetSize());
    6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6871 
    6872  return true;
    6873 }
    6874 
    6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6876 {
    6877  if(!m_FreeSuballocationsBySize.empty())
    6878  {
    6879  return m_FreeSuballocationsBySize.back()->size;
    6880  }
    6881  else
    6882  {
    6883  return 0;
    6884  }
    6885 }
    6886 
    6887 bool VmaBlockMetadata_Generic::IsEmpty() const
    6888 {
    6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6890 }
    6891 
    6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6893 {
    6894  outInfo.blockCount = 1;
    6895 
    6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6897  outInfo.allocationCount = rangeCount - m_FreeCount;
    6898  outInfo.unusedRangeCount = m_FreeCount;
    6899 
    6900  outInfo.unusedBytes = m_SumFreeSize;
    6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6902 
    6903  outInfo.allocationSizeMin = UINT64_MAX;
    6904  outInfo.allocationSizeMax = 0;
    6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6906  outInfo.unusedRangeSizeMax = 0;
    6907 
    6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6909  suballocItem != m_Suballocations.cend();
    6910  ++suballocItem)
    6911  {
    6912  const VmaSuballocation& suballoc = *suballocItem;
    6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6914  {
    6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6917  }
    6918  else
    6919  {
    6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6922  }
    6923  }
    6924 }
    6925 
    6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6927 {
    6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6929 
    6930  inoutStats.size += GetSize();
    6931  inoutStats.unusedSize += m_SumFreeSize;
    6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6933  inoutStats.unusedRangeCount += m_FreeCount;
    6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6935 }
    6936 
    6937 #if VMA_STATS_STRING_ENABLED
    6938 
    6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6940 {
    6941  PrintDetailedMap_Begin(json,
    6942  m_SumFreeSize, // unusedBytes
    6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6944  m_FreeCount); // unusedRangeCount
    6945 
    6946  size_t i = 0;
    6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6948  suballocItem != m_Suballocations.cend();
    6949  ++suballocItem, ++i)
    6950  {
    6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6952  {
    6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6954  }
    6955  else
    6956  {
    6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6958  }
    6959  }
    6960 
    6961  PrintDetailedMap_End(json);
    6962 }
    6963 
    6964 #endif // #if VMA_STATS_STRING_ENABLED
    6965 
    6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6967  uint32_t currentFrameIndex,
    6968  uint32_t frameInUseCount,
    6969  VkDeviceSize bufferImageGranularity,
    6970  VkDeviceSize allocSize,
    6971  VkDeviceSize allocAlignment,
    6972  bool upperAddress,
    6973  VmaSuballocationType allocType,
    6974  bool canMakeOtherLost,
    6975  uint32_t strategy,
    6976  VmaAllocationRequest* pAllocationRequest)
    6977 {
    6978  VMA_ASSERT(allocSize > 0);
    6979  VMA_ASSERT(!upperAddress);
    6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6982  VMA_HEAVY_ASSERT(Validate());
    6983 
    6984  // There is not enough total free space in this block to fullfill the request: Early return.
    6985  if(canMakeOtherLost == false &&
    6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6987  {
    6988  return false;
    6989  }
    6990 
    6991  // New algorithm, efficiently searching freeSuballocationsBySize.
    6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6993  if(freeSuballocCount > 0)
    6994  {
    6996  {
    6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6999  m_FreeSuballocationsBySize.data(),
    7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7001  allocSize + 2 * VMA_DEBUG_MARGIN,
    7002  VmaSuballocationItemSizeLess());
    7003  size_t index = it - m_FreeSuballocationsBySize.data();
    7004  for(; index < freeSuballocCount; ++index)
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  m_FreeSuballocationsBySize[index],
    7014  false, // canMakeOtherLost
    7015  &pAllocationRequest->offset,
    7016  &pAllocationRequest->itemsToMakeLostCount,
    7017  &pAllocationRequest->sumFreeSize,
    7018  &pAllocationRequest->sumItemSize))
    7019  {
    7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7021  return true;
    7022  }
    7023  }
    7024  }
    7025  else // WORST_FIT, FIRST_FIT
    7026  {
    7027  // Search staring from biggest suballocations.
    7028  for(size_t index = freeSuballocCount; index--; )
    7029  {
    7030  if(CheckAllocation(
    7031  currentFrameIndex,
    7032  frameInUseCount,
    7033  bufferImageGranularity,
    7034  allocSize,
    7035  allocAlignment,
    7036  allocType,
    7037  m_FreeSuballocationsBySize[index],
    7038  false, // canMakeOtherLost
    7039  &pAllocationRequest->offset,
    7040  &pAllocationRequest->itemsToMakeLostCount,
    7041  &pAllocationRequest->sumFreeSize,
    7042  &pAllocationRequest->sumItemSize))
    7043  {
    7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7045  return true;
    7046  }
    7047  }
    7048  }
    7049  }
    7050 
    7051  if(canMakeOtherLost)
    7052  {
    7053  // Brute-force algorithm. TODO: Come up with something better.
    7054 
    7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7057 
    7058  VmaAllocationRequest tmpAllocRequest = {};
    7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7060  suballocIt != m_Suballocations.end();
    7061  ++suballocIt)
    7062  {
    7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7064  suballocIt->hAllocation->CanBecomeLost())
    7065  {
    7066  if(CheckAllocation(
    7067  currentFrameIndex,
    7068  frameInUseCount,
    7069  bufferImageGranularity,
    7070  allocSize,
    7071  allocAlignment,
    7072  allocType,
    7073  suballocIt,
    7074  canMakeOtherLost,
    7075  &tmpAllocRequest.offset,
    7076  &tmpAllocRequest.itemsToMakeLostCount,
    7077  &tmpAllocRequest.sumFreeSize,
    7078  &tmpAllocRequest.sumItemSize))
    7079  {
    7080  tmpAllocRequest.item = suballocIt;
    7081 
    7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7084  {
    7085  *pAllocationRequest = tmpAllocRequest;
    7086  }
    7087  }
    7088  }
    7089  }
    7090 
    7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7092  {
    7093  return true;
    7094  }
    7095  }
    7096 
    7097  return false;
    7098 }
    7099 
    7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7101  uint32_t currentFrameIndex,
    7102  uint32_t frameInUseCount,
    7103  VmaAllocationRequest* pAllocationRequest)
    7104 {
    7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7106  {
    7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7108  {
    7109  ++pAllocationRequest->item;
    7110  }
    7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7115  {
    7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7117  --pAllocationRequest->itemsToMakeLostCount;
    7118  }
    7119  else
    7120  {
    7121  return false;
    7122  }
    7123  }
    7124 
    7125  VMA_HEAVY_ASSERT(Validate());
    7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7128 
    7129  return true;
    7130 }
    7131 
    7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7133 {
    7134  uint32_t lostAllocationCount = 0;
    7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7136  it != m_Suballocations.end();
    7137  ++it)
    7138  {
    7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7140  it->hAllocation->CanBecomeLost() &&
    7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7142  {
    7143  it = FreeSuballocation(it);
    7144  ++lostAllocationCount;
    7145  }
    7146  }
    7147  return lostAllocationCount;
    7148 }
    7149 
    7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7151 {
    7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7153  it != m_Suballocations.end();
    7154  ++it)
    7155  {
    7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7157  {
    7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7159  {
    7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7161  return VK_ERROR_VALIDATION_FAILED_EXT;
    7162  }
    7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7164  {
    7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7166  return VK_ERROR_VALIDATION_FAILED_EXT;
    7167  }
    7168  }
    7169  }
    7170 
    7171  return VK_SUCCESS;
    7172 }
    7173 
    7174 void VmaBlockMetadata_Generic::Alloc(
    7175  const VmaAllocationRequest& request,
    7176  VmaSuballocationType type,
    7177  VkDeviceSize allocSize,
    7178  bool upperAddress,
    7179  VmaAllocation hAllocation)
    7180 {
    7181  VMA_ASSERT(!upperAddress);
    7182  VMA_ASSERT(request.item != m_Suballocations.end());
    7183  VmaSuballocation& suballoc = *request.item;
    7184  // Given suballocation is a free block.
    7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7186  // Given offset is inside this suballocation.
    7187  VMA_ASSERT(request.offset >= suballoc.offset);
    7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7191 
    7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7193  // it to become used.
    7194  UnregisterFreeSuballocation(request.item);
    7195 
    7196  suballoc.offset = request.offset;
    7197  suballoc.size = allocSize;
    7198  suballoc.type = type;
    7199  suballoc.hAllocation = hAllocation;
    7200 
    7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7202  if(paddingEnd)
    7203  {
    7204  VmaSuballocation paddingSuballoc = {};
    7205  paddingSuballoc.offset = request.offset + allocSize;
    7206  paddingSuballoc.size = paddingEnd;
    7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7208  VmaSuballocationList::iterator next = request.item;
    7209  ++next;
    7210  const VmaSuballocationList::iterator paddingEndItem =
    7211  m_Suballocations.insert(next, paddingSuballoc);
    7212  RegisterFreeSuballocation(paddingEndItem);
    7213  }
    7214 
    7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7216  if(paddingBegin)
    7217  {
    7218  VmaSuballocation paddingSuballoc = {};
    7219  paddingSuballoc.offset = request.offset - paddingBegin;
    7220  paddingSuballoc.size = paddingBegin;
    7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7222  const VmaSuballocationList::iterator paddingBeginItem =
    7223  m_Suballocations.insert(request.item, paddingSuballoc);
    7224  RegisterFreeSuballocation(paddingBeginItem);
    7225  }
    7226 
    7227  // Update totals.
    7228  m_FreeCount = m_FreeCount - 1;
    7229  if(paddingBegin > 0)
    7230  {
    7231  ++m_FreeCount;
    7232  }
    7233  if(paddingEnd > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  m_SumFreeSize -= allocSize;
    7238 }
    7239 
    7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7241 {
    7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7243  suballocItem != m_Suballocations.end();
    7244  ++suballocItem)
    7245  {
    7246  VmaSuballocation& suballoc = *suballocItem;
    7247  if(suballoc.hAllocation == allocation)
    7248  {
    7249  FreeSuballocation(suballocItem);
    7250  VMA_HEAVY_ASSERT(Validate());
    7251  return;
    7252  }
    7253  }
    7254  VMA_ASSERT(0 && "Not found!");
    7255 }
    7256 
    7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7258 {
    7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7260  suballocItem != m_Suballocations.end();
    7261  ++suballocItem)
    7262  {
    7263  VmaSuballocation& suballoc = *suballocItem;
    7264  if(suballoc.offset == offset)
    7265  {
    7266  FreeSuballocation(suballocItem);
    7267  return;
    7268  }
    7269  }
    7270  VMA_ASSERT(0 && "Not found!");
    7271 }
    7272 
    7273 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7274 {
    7275  typedef VmaSuballocationList::iterator iter_type;
    7276  for(iter_type suballocItem = m_Suballocations.begin();
    7277  suballocItem != m_Suballocations.end();
    7278  ++suballocItem)
    7279  {
    7280  VmaSuballocation& suballoc = *suballocItem;
    7281  if(suballoc.hAllocation == alloc)
    7282  {
    7283  iter_type nextItem = suballocItem;
    7284  ++nextItem;
    7285 
    7286  // Should have been ensured on higher level.
    7287  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7288 
    7289  // Shrinking.
    7290  if(newSize < alloc->GetSize())
    7291  {
    7292  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7293 
    7294  // There is next item.
    7295  if(nextItem != m_Suballocations.end())
    7296  {
    7297  // Next item is free.
    7298  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7299  {
    7300  // Grow this next item backward.
    7301  UnregisterFreeSuballocation(nextItem);
    7302  nextItem->offset -= sizeDiff;
    7303  nextItem->size += sizeDiff;
    7304  RegisterFreeSuballocation(nextItem);
    7305  }
    7306  // Next item is not free.
    7307  else
    7308  {
    7309  // Create free item after current one.
    7310  VmaSuballocation newFreeSuballoc;
    7311  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7312  newFreeSuballoc.offset = suballoc.offset + newSize;
    7313  newFreeSuballoc.size = sizeDiff;
    7314  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7315  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7316  RegisterFreeSuballocation(newFreeSuballocIt);
    7317 
    7318  ++m_FreeCount;
    7319  }
    7320  }
    7321  // This is the last item.
    7322  else
    7323  {
    7324  // Create free item at the end.
    7325  VmaSuballocation newFreeSuballoc;
    7326  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7327  newFreeSuballoc.offset = suballoc.offset + newSize;
    7328  newFreeSuballoc.size = sizeDiff;
    7329  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7330  m_Suballocations.push_back(newFreeSuballoc);
    7331 
    7332  iter_type newFreeSuballocIt = m_Suballocations.end();
    7333  RegisterFreeSuballocation(--newFreeSuballocIt);
    7334 
    7335  ++m_FreeCount;
    7336  }
    7337 
    7338  suballoc.size = newSize;
    7339  m_SumFreeSize += sizeDiff;
    7340  }
    7341  // Growing.
    7342  else
    7343  {
    7344  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7345 
    7346  // There is next item.
    7347  if(nextItem != m_Suballocations.end())
    7348  {
    7349  // Next item is free.
    7350  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7351  {
    7352  // There is not enough free space, including margin.
    7353  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7354  {
    7355  return false;
    7356  }
    7357 
    7358  // There is more free space than required.
    7359  if(nextItem->size > sizeDiff)
    7360  {
    7361  // Move and shrink this next item.
    7362  UnregisterFreeSuballocation(nextItem);
    7363  nextItem->offset += sizeDiff;
    7364  nextItem->size -= sizeDiff;
    7365  RegisterFreeSuballocation(nextItem);
    7366  }
    7367  // There is exactly the amount of free space required.
    7368  else
    7369  {
    7370  // Remove this next free item.
    7371  UnregisterFreeSuballocation(nextItem);
    7372  m_Suballocations.erase(nextItem);
    7373  --m_FreeCount;
    7374  }
    7375  }
    7376  // Next item is not free - there is no space to grow.
    7377  else
    7378  {
    7379  return false;
    7380  }
    7381  }
    7382  // This is the last item - there is no space to grow.
    7383  else
    7384  {
    7385  return false;
    7386  }
    7387 
    7388  suballoc.size = newSize;
    7389  m_SumFreeSize -= sizeDiff;
    7390  }
    7391 
    7392  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7393  return true;
    7394  }
    7395  }
    7396  VMA_ASSERT(0 && "Not found!");
    7397  return false;
    7398 }
    7399 
    7400 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7401 {
    7402  VkDeviceSize lastSize = 0;
    7403  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7404  {
    7405  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7406 
    7407  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7408  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7409  VMA_VALIDATE(it->size >= lastSize);
    7410  lastSize = it->size;
    7411  }
    7412  return true;
    7413 }
    7414 
    7415 bool VmaBlockMetadata_Generic::CheckAllocation(
    7416  uint32_t currentFrameIndex,
    7417  uint32_t frameInUseCount,
    7418  VkDeviceSize bufferImageGranularity,
    7419  VkDeviceSize allocSize,
    7420  VkDeviceSize allocAlignment,
    7421  VmaSuballocationType allocType,
    7422  VmaSuballocationList::const_iterator suballocItem,
    7423  bool canMakeOtherLost,
    7424  VkDeviceSize* pOffset,
    7425  size_t* itemsToMakeLostCount,
    7426  VkDeviceSize* pSumFreeSize,
    7427  VkDeviceSize* pSumItemSize) const
    7428 {
    7429  VMA_ASSERT(allocSize > 0);
    7430  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7431  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7432  VMA_ASSERT(pOffset != VMA_NULL);
    7433 
    7434  *itemsToMakeLostCount = 0;
    7435  *pSumFreeSize = 0;
    7436  *pSumItemSize = 0;
    7437 
    7438  if(canMakeOtherLost)
    7439  {
    7440  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7441  {
    7442  *pSumFreeSize = suballocItem->size;
    7443  }
    7444  else
    7445  {
    7446  if(suballocItem->hAllocation->CanBecomeLost() &&
    7447  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7448  {
    7449  ++*itemsToMakeLostCount;
    7450  *pSumItemSize = suballocItem->size;
    7451  }
    7452  else
    7453  {
    7454  return false;
    7455  }
    7456  }
    7457 
    7458  // Remaining size is too small for this request: Early return.
    7459  if(GetSize() - suballocItem->offset < allocSize)
    7460  {
    7461  return false;
    7462  }
    7463 
    7464  // Start from offset equal to beginning of this suballocation.
    7465  *pOffset = suballocItem->offset;
    7466 
    7467  // Apply VMA_DEBUG_MARGIN at the beginning.
    7468  if(VMA_DEBUG_MARGIN > 0)
    7469  {
    7470  *pOffset += VMA_DEBUG_MARGIN;
    7471  }
    7472 
    7473  // Apply alignment.
    7474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7475 
    7476  // Check previous suballocations for BufferImageGranularity conflicts.
    7477  // Make bigger alignment if necessary.
    7478  if(bufferImageGranularity > 1)
    7479  {
    7480  bool bufferImageGranularityConflict = false;
    7481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7482  while(prevSuballocItem != m_Suballocations.cbegin())
    7483  {
    7484  --prevSuballocItem;
    7485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7487  {
    7488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7489  {
    7490  bufferImageGranularityConflict = true;
    7491  break;
    7492  }
    7493  }
    7494  else
    7495  // Already on previous page.
    7496  break;
    7497  }
    7498  if(bufferImageGranularityConflict)
    7499  {
    7500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7501  }
    7502  }
    7503 
    7504  // Now that we have final *pOffset, check if we are past suballocItem.
    7505  // If yes, return false - this function should be called for another suballocItem as starting point.
    7506  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7507  {
    7508  return false;
    7509  }
    7510 
    7511  // Calculate padding at the beginning based on current offset.
    7512  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7513 
    7514  // Calculate required margin at the end.
    7515  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7516 
    7517  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7518  // Another early return check.
    7519  if(suballocItem->offset + totalSize > GetSize())
    7520  {
    7521  return false;
    7522  }
    7523 
    7524  // Advance lastSuballocItem until desired size is reached.
    7525  // Update itemsToMakeLostCount.
    7526  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7527  if(totalSize > suballocItem->size)
    7528  {
    7529  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7530  while(remainingSize > 0)
    7531  {
    7532  ++lastSuballocItem;
    7533  if(lastSuballocItem == m_Suballocations.cend())
    7534  {
    7535  return false;
    7536  }
    7537  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7538  {
    7539  *pSumFreeSize += lastSuballocItem->size;
    7540  }
    7541  else
    7542  {
    7543  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7544  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7545  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7546  {
    7547  ++*itemsToMakeLostCount;
    7548  *pSumItemSize += lastSuballocItem->size;
    7549  }
    7550  else
    7551  {
    7552  return false;
    7553  }
    7554  }
    7555  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7556  remainingSize - lastSuballocItem->size : 0;
    7557  }
    7558  }
    7559 
    7560  // Check next suballocations for BufferImageGranularity conflicts.
    7561  // If conflict exists, we must mark more allocations lost or fail.
    7562  if(bufferImageGranularity > 1)
    7563  {
    7564  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7565  ++nextSuballocItem;
    7566  while(nextSuballocItem != m_Suballocations.cend())
    7567  {
    7568  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7569  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7570  {
    7571  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7572  {
    7573  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7574  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7575  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7576  {
    7577  ++*itemsToMakeLostCount;
    7578  }
    7579  else
    7580  {
    7581  return false;
    7582  }
    7583  }
    7584  }
    7585  else
    7586  {
    7587  // Already on next page.
    7588  break;
    7589  }
    7590  ++nextSuballocItem;
    7591  }
    7592  }
    7593  }
    7594  else
    7595  {
    7596  const VmaSuballocation& suballoc = *suballocItem;
    7597  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7598 
    7599  *pSumFreeSize = suballoc.size;
    7600 
    7601  // Size of this suballocation is too small for this request: Early return.
    7602  if(suballoc.size < allocSize)
    7603  {
    7604  return false;
    7605  }
    7606 
    7607  // Start from offset equal to beginning of this suballocation.
    7608  *pOffset = suballoc.offset;
    7609 
    7610  // Apply VMA_DEBUG_MARGIN at the beginning.
    7611  if(VMA_DEBUG_MARGIN > 0)
    7612  {
    7613  *pOffset += VMA_DEBUG_MARGIN;
    7614  }
    7615 
    7616  // Apply alignment.
    7617  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7618 
    7619  // Check previous suballocations for BufferImageGranularity conflicts.
    7620  // Make bigger alignment if necessary.
    7621  if(bufferImageGranularity > 1)
    7622  {
    7623  bool bufferImageGranularityConflict = false;
    7624  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7625  while(prevSuballocItem != m_Suballocations.cbegin())
    7626  {
    7627  --prevSuballocItem;
    7628  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7629  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7630  {
    7631  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7632  {
    7633  bufferImageGranularityConflict = true;
    7634  break;
    7635  }
    7636  }
    7637  else
    7638  // Already on previous page.
    7639  break;
    7640  }
    7641  if(bufferImageGranularityConflict)
    7642  {
    7643  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7644  }
    7645  }
    7646 
    7647  // Calculate padding at the beginning based on current offset.
    7648  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7649 
    7650  // Calculate required margin at the end.
    7651  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7652 
    7653  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7654  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7655  {
    7656  return false;
    7657  }
    7658 
    7659  // Check next suballocations for BufferImageGranularity conflicts.
    7660  // If conflict exists, allocation cannot be made here.
    7661  if(bufferImageGranularity > 1)
    7662  {
    7663  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7664  ++nextSuballocItem;
    7665  while(nextSuballocItem != m_Suballocations.cend())
    7666  {
    7667  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7668  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7669  {
    7670  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7671  {
    7672  return false;
    7673  }
    7674  }
    7675  else
    7676  {
    7677  // Already on next page.
    7678  break;
    7679  }
    7680  ++nextSuballocItem;
    7681  }
    7682  }
    7683  }
    7684 
    7685  // All tests passed: Success. pOffset is already filled.
    7686  return true;
    7687 }
    7688 
    7689 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7690 {
    7691  VMA_ASSERT(item != m_Suballocations.end());
    7692  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7693 
    7694  VmaSuballocationList::iterator nextItem = item;
    7695  ++nextItem;
    7696  VMA_ASSERT(nextItem != m_Suballocations.end());
    7697  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7698 
    7699  item->size += nextItem->size;
    7700  --m_FreeCount;
    7701  m_Suballocations.erase(nextItem);
    7702 }
    7703 
    7704 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7705 {
    7706  // Change this suballocation to be marked as free.
    7707  VmaSuballocation& suballoc = *suballocItem;
    7708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7709  suballoc.hAllocation = VK_NULL_HANDLE;
    7710 
    7711  // Update totals.
    7712  ++m_FreeCount;
    7713  m_SumFreeSize += suballoc.size;
    7714 
    7715  // Merge with previous and/or next suballocation if it's also free.
    7716  bool mergeWithNext = false;
    7717  bool mergeWithPrev = false;
    7718 
    7719  VmaSuballocationList::iterator nextItem = suballocItem;
    7720  ++nextItem;
    7721  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7722  {
    7723  mergeWithNext = true;
    7724  }
    7725 
    7726  VmaSuballocationList::iterator prevItem = suballocItem;
    7727  if(suballocItem != m_Suballocations.begin())
    7728  {
    7729  --prevItem;
    7730  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7731  {
    7732  mergeWithPrev = true;
    7733  }
    7734  }
    7735 
    7736  if(mergeWithNext)
    7737  {
    7738  UnregisterFreeSuballocation(nextItem);
    7739  MergeFreeWithNext(suballocItem);
    7740  }
    7741 
    7742  if(mergeWithPrev)
    7743  {
    7744  UnregisterFreeSuballocation(prevItem);
    7745  MergeFreeWithNext(prevItem);
    7746  RegisterFreeSuballocation(prevItem);
    7747  return prevItem;
    7748  }
    7749  else
    7750  {
    7751  RegisterFreeSuballocation(suballocItem);
    7752  return suballocItem;
    7753  }
    7754 }
    7755 
    7756 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7757 {
    7758  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7759  VMA_ASSERT(item->size > 0);
    7760 
    7761  // You may want to enable this validation at the beginning or at the end of
    7762  // this function, depending on what do you want to check.
    7763  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7764 
    7765  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7766  {
    7767  if(m_FreeSuballocationsBySize.empty())
    7768  {
    7769  m_FreeSuballocationsBySize.push_back(item);
    7770  }
    7771  else
    7772  {
    7773  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7774  }
    7775  }
    7776 
    7777  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7778 }
    7779 
    7780 
    7781 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7782 {
    7783  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7784  VMA_ASSERT(item->size > 0);
    7785 
    7786  // You may want to enable this validation at the beginning or at the end of
    7787  // this function, depending on what do you want to check.
    7788  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7789 
    7790  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7791  {
    7792  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7793  m_FreeSuballocationsBySize.data(),
    7794  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7795  item,
    7796  VmaSuballocationItemSizeLess());
    7797  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7798  index < m_FreeSuballocationsBySize.size();
    7799  ++index)
    7800  {
    7801  if(m_FreeSuballocationsBySize[index] == item)
    7802  {
    7803  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7804  return;
    7805  }
    7806  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7807  }
    7808  VMA_ASSERT(0 && "Not found.");
    7809  }
    7810 
    7811  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7812 }
    7813 
    7815 // class VmaBlockMetadata_Linear
    7816 
    7817 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7818  VmaBlockMetadata(hAllocator),
    7819  m_SumFreeSize(0),
    7820  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7821  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7822  m_1stVectorIndex(0),
    7823  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7824  m_1stNullItemsBeginCount(0),
    7825  m_1stNullItemsMiddleCount(0),
    7826  m_2ndNullItemsCount(0)
    7827 {
    7828 }
    7829 
    7830 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7831 {
    7832 }
    7833 
    7834 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7835 {
    7836  VmaBlockMetadata::Init(size);
    7837  m_SumFreeSize = size;
    7838 }
    7839 
    7840 bool VmaBlockMetadata_Linear::Validate() const
    7841 {
    7842  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7843  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7844 
    7845  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7846  VMA_VALIDATE(!suballocations1st.empty() ||
    7847  suballocations2nd.empty() ||
    7848  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7849 
    7850  if(!suballocations1st.empty())
    7851  {
    7852  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7853  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7854  // Null item at the end should be just pop_back().
    7855  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7856  }
    7857  if(!suballocations2nd.empty())
    7858  {
    7859  // Null item at the end should be just pop_back().
    7860  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7861  }
    7862 
    7863  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7864  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7865 
    7866  VkDeviceSize sumUsedSize = 0;
    7867  const size_t suballoc1stCount = suballocations1st.size();
    7868  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7869 
    7870  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7871  {
    7872  const size_t suballoc2ndCount = suballocations2nd.size();
    7873  size_t nullItem2ndCount = 0;
    7874  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7875  {
    7876  const VmaSuballocation& suballoc = suballocations2nd[i];
    7877  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7878 
    7879  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7880  VMA_VALIDATE(suballoc.offset >= offset);
    7881 
    7882  if(!currFree)
    7883  {
    7884  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7885  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7886  sumUsedSize += suballoc.size;
    7887  }
    7888  else
    7889  {
    7890  ++nullItem2ndCount;
    7891  }
    7892 
    7893  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7894  }
    7895 
    7896  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7897  }
    7898 
    7899  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7900  {
    7901  const VmaSuballocation& suballoc = suballocations1st[i];
    7902  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7903  suballoc.hAllocation == VK_NULL_HANDLE);
    7904  }
    7905 
    7906  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7907 
    7908  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7909  {
    7910  const VmaSuballocation& suballoc = suballocations1st[i];
    7911  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7912 
    7913  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7914  VMA_VALIDATE(suballoc.offset >= offset);
    7915  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7916 
    7917  if(!currFree)
    7918  {
    7919  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7920  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7921  sumUsedSize += suballoc.size;
    7922  }
    7923  else
    7924  {
    7925  ++nullItem1stCount;
    7926  }
    7927 
    7928  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7929  }
    7930  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7931 
    7932  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7933  {
    7934  const size_t suballoc2ndCount = suballocations2nd.size();
    7935  size_t nullItem2ndCount = 0;
    7936  for(size_t i = suballoc2ndCount; i--; )
    7937  {
    7938  const VmaSuballocation& suballoc = suballocations2nd[i];
    7939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7940 
    7941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7942  VMA_VALIDATE(suballoc.offset >= offset);
    7943 
    7944  if(!currFree)
    7945  {
    7946  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7947  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7948  sumUsedSize += suballoc.size;
    7949  }
    7950  else
    7951  {
    7952  ++nullItem2ndCount;
    7953  }
    7954 
    7955  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7956  }
    7957 
    7958  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7959  }
    7960 
    7961  VMA_VALIDATE(offset <= GetSize());
    7962  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7963 
    7964  return true;
    7965 }
    7966 
    7967 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7968 {
    7969  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7970  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7971 }
    7972 
    7973 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7974 {
    7975  const VkDeviceSize size = GetSize();
    7976 
    7977  /*
    7978  We don't consider gaps inside allocation vectors with freed allocations because
    7979  they are not suitable for reuse in linear allocator. We consider only space that
    7980  is available for new allocations.
    7981  */
    7982  if(IsEmpty())
    7983  {
    7984  return size;
    7985  }
    7986 
    7987  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7988 
    7989  switch(m_2ndVectorMode)
    7990  {
    7991  case SECOND_VECTOR_EMPTY:
    7992  /*
    7993  Available space is after end of 1st, as well as before beginning of 1st (which
    7994  whould make it a ring buffer).
    7995  */
    7996  {
    7997  const size_t suballocations1stCount = suballocations1st.size();
    7998  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7999  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8000  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8001  return VMA_MAX(
    8002  firstSuballoc.offset,
    8003  size - (lastSuballoc.offset + lastSuballoc.size));
    8004  }
    8005  break;
    8006 
    8007  case SECOND_VECTOR_RING_BUFFER:
    8008  /*
    8009  Available space is only between end of 2nd and beginning of 1st.
    8010  */
    8011  {
    8012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8013  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8014  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8015  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8016  }
    8017  break;
    8018 
    8019  case SECOND_VECTOR_DOUBLE_STACK:
    8020  /*
    8021  Available space is only between end of 1st and top of 2nd.
    8022  */
    8023  {
    8024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8025  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8026  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8027  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8028  }
    8029  break;
    8030 
    8031  default:
    8032  VMA_ASSERT(0);
    8033  return 0;
    8034  }
    8035 }
    8036 
    8037 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8038 {
    8039  const VkDeviceSize size = GetSize();
    8040  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8041  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8042  const size_t suballoc1stCount = suballocations1st.size();
    8043  const size_t suballoc2ndCount = suballocations2nd.size();
    8044 
    8045  outInfo.blockCount = 1;
    8046  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8047  outInfo.unusedRangeCount = 0;
    8048  outInfo.usedBytes = 0;
    8049  outInfo.allocationSizeMin = UINT64_MAX;
    8050  outInfo.allocationSizeMax = 0;
    8051  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8052  outInfo.unusedRangeSizeMax = 0;
    8053 
    8054  VkDeviceSize lastOffset = 0;
    8055 
    8056  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8057  {
    8058  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8059  size_t nextAlloc2ndIndex = 0;
    8060  while(lastOffset < freeSpace2ndTo1stEnd)
    8061  {
    8062  // Find next non-null allocation or move nextAllocIndex to the end.
    8063  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8065  {
    8066  ++nextAlloc2ndIndex;
    8067  }
    8068 
    8069  // Found non-null allocation.
    8070  if(nextAlloc2ndIndex < suballoc2ndCount)
    8071  {
    8072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8073 
    8074  // 1. Process free space before this allocation.
    8075  if(lastOffset < suballoc.offset)
    8076  {
    8077  // There is free space from lastOffset to suballoc.offset.
    8078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8079  ++outInfo.unusedRangeCount;
    8080  outInfo.unusedBytes += unusedRangeSize;
    8081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8083  }
    8084 
    8085  // 2. Process this allocation.
    8086  // There is allocation with suballoc.offset, suballoc.size.
    8087  outInfo.usedBytes += suballoc.size;
    8088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8090 
    8091  // 3. Prepare for next iteration.
    8092  lastOffset = suballoc.offset + suballoc.size;
    8093  ++nextAlloc2ndIndex;
    8094  }
    8095  // We are at the end.
    8096  else
    8097  {
    8098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8099  if(lastOffset < freeSpace2ndTo1stEnd)
    8100  {
    8101  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8102  ++outInfo.unusedRangeCount;
    8103  outInfo.unusedBytes += unusedRangeSize;
    8104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8106  }
    8107 
    8108  // End of loop.
    8109  lastOffset = freeSpace2ndTo1stEnd;
    8110  }
    8111  }
    8112  }
    8113 
    8114  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8115  const VkDeviceSize freeSpace1stTo2ndEnd =
    8116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8117  while(lastOffset < freeSpace1stTo2ndEnd)
    8118  {
    8119  // Find next non-null allocation or move nextAllocIndex to the end.
    8120  while(nextAlloc1stIndex < suballoc1stCount &&
    8121  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8122  {
    8123  ++nextAlloc1stIndex;
    8124  }
    8125 
    8126  // Found non-null allocation.
    8127  if(nextAlloc1stIndex < suballoc1stCount)
    8128  {
    8129  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8130 
    8131  // 1. Process free space before this allocation.
    8132  if(lastOffset < suballoc.offset)
    8133  {
    8134  // There is free space from lastOffset to suballoc.offset.
    8135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8136  ++outInfo.unusedRangeCount;
    8137  outInfo.unusedBytes += unusedRangeSize;
    8138  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8139  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8140  }
    8141 
    8142  // 2. Process this allocation.
    8143  // There is allocation with suballoc.offset, suballoc.size.
    8144  outInfo.usedBytes += suballoc.size;
    8145  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8146  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8147 
    8148  // 3. Prepare for next iteration.
    8149  lastOffset = suballoc.offset + suballoc.size;
    8150  ++nextAlloc1stIndex;
    8151  }
    8152  // We are at the end.
    8153  else
    8154  {
    8155  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8156  if(lastOffset < freeSpace1stTo2ndEnd)
    8157  {
    8158  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8159  ++outInfo.unusedRangeCount;
    8160  outInfo.unusedBytes += unusedRangeSize;
    8161  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8162  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8163  }
    8164 
    8165  // End of loop.
    8166  lastOffset = freeSpace1stTo2ndEnd;
    8167  }
    8168  }
    8169 
    8170  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8171  {
    8172  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8173  while(lastOffset < size)
    8174  {
    8175  // Find next non-null allocation or move nextAllocIndex to the end.
    8176  while(nextAlloc2ndIndex != SIZE_MAX &&
    8177  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8178  {
    8179  --nextAlloc2ndIndex;
    8180  }
    8181 
    8182  // Found non-null allocation.
    8183  if(nextAlloc2ndIndex != SIZE_MAX)
    8184  {
    8185  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8186 
    8187  // 1. Process free space before this allocation.
    8188  if(lastOffset < suballoc.offset)
    8189  {
    8190  // There is free space from lastOffset to suballoc.offset.
    8191  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8192  ++outInfo.unusedRangeCount;
    8193  outInfo.unusedBytes += unusedRangeSize;
    8194  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8195  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8196  }
    8197 
    8198  // 2. Process this allocation.
    8199  // There is allocation with suballoc.offset, suballoc.size.
    8200  outInfo.usedBytes += suballoc.size;
    8201  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8202  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8203 
    8204  // 3. Prepare for next iteration.
    8205  lastOffset = suballoc.offset + suballoc.size;
    8206  --nextAlloc2ndIndex;
    8207  }
    8208  // We are at the end.
    8209  else
    8210  {
    8211  // There is free space from lastOffset to size.
    8212  if(lastOffset < size)
    8213  {
    8214  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8215  ++outInfo.unusedRangeCount;
    8216  outInfo.unusedBytes += unusedRangeSize;
    8217  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8218  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8219  }
    8220 
    8221  // End of loop.
    8222  lastOffset = size;
    8223  }
    8224  }
    8225  }
    8226 
    8227  outInfo.unusedBytes = size - outInfo.usedBytes;
    8228 }
    8229 
    8230 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8231 {
    8232  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8233  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8234  const VkDeviceSize size = GetSize();
    8235  const size_t suballoc1stCount = suballocations1st.size();
    8236  const size_t suballoc2ndCount = suballocations2nd.size();
    8237 
    8238  inoutStats.size += size;
    8239 
    8240  VkDeviceSize lastOffset = 0;
    8241 
    8242  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8243  {
    8244  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8245  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8246  while(lastOffset < freeSpace2ndTo1stEnd)
    8247  {
    8248  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8249  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8250  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8251  {
    8252  ++nextAlloc2ndIndex;
    8253  }
    8254 
    8255  // Found non-null allocation.
    8256  if(nextAlloc2ndIndex < suballoc2ndCount)
    8257  {
    8258  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8259 
    8260  // 1. Process free space before this allocation.
    8261  if(lastOffset < suballoc.offset)
    8262  {
    8263  // There is free space from lastOffset to suballoc.offset.
    8264  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8265  inoutStats.unusedSize += unusedRangeSize;
    8266  ++inoutStats.unusedRangeCount;
    8267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8268  }
    8269 
    8270  // 2. Process this allocation.
    8271  // There is allocation with suballoc.offset, suballoc.size.
    8272  ++inoutStats.allocationCount;
    8273 
    8274  // 3. Prepare for next iteration.
    8275  lastOffset = suballoc.offset + suballoc.size;
    8276  ++nextAlloc2ndIndex;
    8277  }
    8278  // We are at the end.
    8279  else
    8280  {
    8281  if(lastOffset < freeSpace2ndTo1stEnd)
    8282  {
    8283  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8284  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8285  inoutStats.unusedSize += unusedRangeSize;
    8286  ++inoutStats.unusedRangeCount;
    8287  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8288  }
    8289 
    8290  // End of loop.
    8291  lastOffset = freeSpace2ndTo1stEnd;
    8292  }
    8293  }
    8294  }
    8295 
    8296  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8297  const VkDeviceSize freeSpace1stTo2ndEnd =
    8298  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8299  while(lastOffset < freeSpace1stTo2ndEnd)
    8300  {
    8301  // Find next non-null allocation or move nextAllocIndex to the end.
    8302  while(nextAlloc1stIndex < suballoc1stCount &&
    8303  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8304  {
    8305  ++nextAlloc1stIndex;
    8306  }
    8307 
    8308  // Found non-null allocation.
    8309  if(nextAlloc1stIndex < suballoc1stCount)
    8310  {
    8311  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8312 
    8313  // 1. Process free space before this allocation.
    8314  if(lastOffset < suballoc.offset)
    8315  {
    8316  // There is free space from lastOffset to suballoc.offset.
    8317  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8318  inoutStats.unusedSize += unusedRangeSize;
    8319  ++inoutStats.unusedRangeCount;
    8320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8321  }
    8322 
    8323  // 2. Process this allocation.
    8324  // There is allocation with suballoc.offset, suballoc.size.
    8325  ++inoutStats.allocationCount;
    8326 
    8327  // 3. Prepare for next iteration.
    8328  lastOffset = suballoc.offset + suballoc.size;
    8329  ++nextAlloc1stIndex;
    8330  }
    8331  // We are at the end.
    8332  else
    8333  {
    8334  if(lastOffset < freeSpace1stTo2ndEnd)
    8335  {
    8336  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8337  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8338  inoutStats.unusedSize += unusedRangeSize;
    8339  ++inoutStats.unusedRangeCount;
    8340  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8341  }
    8342 
    8343  // End of loop.
    8344  lastOffset = freeSpace1stTo2ndEnd;
    8345  }
    8346  }
    8347 
    8348  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8349  {
    8350  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8351  while(lastOffset < size)
    8352  {
    8353  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8354  while(nextAlloc2ndIndex != SIZE_MAX &&
    8355  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8356  {
    8357  --nextAlloc2ndIndex;
    8358  }
    8359 
    8360  // Found non-null allocation.
    8361  if(nextAlloc2ndIndex != SIZE_MAX)
    8362  {
    8363  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8364 
    8365  // 1. Process free space before this allocation.
    8366  if(lastOffset < suballoc.offset)
    8367  {
    8368  // There is free space from lastOffset to suballoc.offset.
    8369  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8370  inoutStats.unusedSize += unusedRangeSize;
    8371  ++inoutStats.unusedRangeCount;
    8372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8373  }
    8374 
    8375  // 2. Process this allocation.
    8376  // There is allocation with suballoc.offset, suballoc.size.
    8377  ++inoutStats.allocationCount;
    8378 
    8379  // 3. Prepare for next iteration.
    8380  lastOffset = suballoc.offset + suballoc.size;
    8381  --nextAlloc2ndIndex;
    8382  }
    8383  // We are at the end.
    8384  else
    8385  {
    8386  if(lastOffset < size)
    8387  {
    8388  // There is free space from lastOffset to size.
    8389  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8390  inoutStats.unusedSize += unusedRangeSize;
    8391  ++inoutStats.unusedRangeCount;
    8392  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8393  }
    8394 
    8395  // End of loop.
    8396  lastOffset = size;
    8397  }
    8398  }
    8399  }
    8400 }
    8401 
    8402 #if VMA_STATS_STRING_ENABLED
    8403 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8404 {
    8405  const VkDeviceSize size = GetSize();
    8406  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8407  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8408  const size_t suballoc1stCount = suballocations1st.size();
    8409  const size_t suballoc2ndCount = suballocations2nd.size();
    8410 
    8411  // FIRST PASS
    8412 
    8413  size_t unusedRangeCount = 0;
    8414  VkDeviceSize usedBytes = 0;
    8415 
    8416  VkDeviceSize lastOffset = 0;
    8417 
    8418  size_t alloc2ndCount = 0;
    8419  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8420  {
    8421  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8422  size_t nextAlloc2ndIndex = 0;
    8423  while(lastOffset < freeSpace2ndTo1stEnd)
    8424  {
    8425  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8426  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8427  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8428  {
    8429  ++nextAlloc2ndIndex;
    8430  }
    8431 
    8432  // Found non-null allocation.
    8433  if(nextAlloc2ndIndex < suballoc2ndCount)
    8434  {
    8435  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8436 
    8437  // 1. Process free space before this allocation.
    8438  if(lastOffset < suballoc.offset)
    8439  {
    8440  // There is free space from lastOffset to suballoc.offset.
    8441  ++unusedRangeCount;
    8442  }
    8443 
    8444  // 2. Process this allocation.
    8445  // There is allocation with suballoc.offset, suballoc.size.
    8446  ++alloc2ndCount;
    8447  usedBytes += suballoc.size;
    8448 
    8449  // 3. Prepare for next iteration.
    8450  lastOffset = suballoc.offset + suballoc.size;
    8451  ++nextAlloc2ndIndex;
    8452  }
    8453  // We are at the end.
    8454  else
    8455  {
    8456  if(lastOffset < freeSpace2ndTo1stEnd)
    8457  {
    8458  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8459  ++unusedRangeCount;
    8460  }
    8461 
    8462  // End of loop.
    8463  lastOffset = freeSpace2ndTo1stEnd;
    8464  }
    8465  }
    8466  }
    8467 
    8468  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8469  size_t alloc1stCount = 0;
    8470  const VkDeviceSize freeSpace1stTo2ndEnd =
    8471  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8472  while(lastOffset < freeSpace1stTo2ndEnd)
    8473  {
    8474  // Find next non-null allocation or move nextAllocIndex to the end.
    8475  while(nextAlloc1stIndex < suballoc1stCount &&
    8476  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8477  {
    8478  ++nextAlloc1stIndex;
    8479  }
    8480 
    8481  // Found non-null allocation.
    8482  if(nextAlloc1stIndex < suballoc1stCount)
    8483  {
    8484  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8485 
    8486  // 1. Process free space before this allocation.
    8487  if(lastOffset < suballoc.offset)
    8488  {
    8489  // There is free space from lastOffset to suballoc.offset.
    8490  ++unusedRangeCount;
    8491  }
    8492 
    8493  // 2. Process this allocation.
    8494  // There is allocation with suballoc.offset, suballoc.size.
    8495  ++alloc1stCount;
    8496  usedBytes += suballoc.size;
    8497 
    8498  // 3. Prepare for next iteration.
    8499  lastOffset = suballoc.offset + suballoc.size;
    8500  ++nextAlloc1stIndex;
    8501  }
    8502  // We are at the end.
    8503  else
    8504  {
    8505  if(lastOffset < size)
    8506  {
    8507  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8508  ++unusedRangeCount;
    8509  }
    8510 
    8511  // End of loop.
    8512  lastOffset = freeSpace1stTo2ndEnd;
    8513  }
    8514  }
    8515 
    8516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8517  {
    8518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8519  while(lastOffset < size)
    8520  {
    8521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8522  while(nextAlloc2ndIndex != SIZE_MAX &&
    8523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8524  {
    8525  --nextAlloc2ndIndex;
    8526  }
    8527 
    8528  // Found non-null allocation.
    8529  if(nextAlloc2ndIndex != SIZE_MAX)
    8530  {
    8531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8532 
    8533  // 1. Process free space before this allocation.
    8534  if(lastOffset < suballoc.offset)
    8535  {
    8536  // There is free space from lastOffset to suballoc.offset.
    8537  ++unusedRangeCount;
    8538  }
    8539 
    8540  // 2. Process this allocation.
    8541  // There is allocation with suballoc.offset, suballoc.size.
    8542  ++alloc2ndCount;
    8543  usedBytes += suballoc.size;
    8544 
    8545  // 3. Prepare for next iteration.
    8546  lastOffset = suballoc.offset + suballoc.size;
    8547  --nextAlloc2ndIndex;
    8548  }
    8549  // We are at the end.
    8550  else
    8551  {
    8552  if(lastOffset < size)
    8553  {
    8554  // There is free space from lastOffset to size.
    8555  ++unusedRangeCount;
    8556  }
    8557 
    8558  // End of loop.
    8559  lastOffset = size;
    8560  }
    8561  }
    8562  }
    8563 
    8564  const VkDeviceSize unusedBytes = size - usedBytes;
    8565  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8566 
    8567  // SECOND PASS
    8568  lastOffset = 0;
    8569 
    8570  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8571  {
    8572  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8573  size_t nextAlloc2ndIndex = 0;
    8574  while(lastOffset < freeSpace2ndTo1stEnd)
    8575  {
    8576  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8577  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8578  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8579  {
    8580  ++nextAlloc2ndIndex;
    8581  }
    8582 
    8583  // Found non-null allocation.
    8584  if(nextAlloc2ndIndex < suballoc2ndCount)
    8585  {
    8586  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8587 
    8588  // 1. Process free space before this allocation.
    8589  if(lastOffset < suballoc.offset)
    8590  {
    8591  // There is free space from lastOffset to suballoc.offset.
    8592  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8594  }
    8595 
    8596  // 2. Process this allocation.
    8597  // There is allocation with suballoc.offset, suballoc.size.
    8598  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8599 
    8600  // 3. Prepare for next iteration.
    8601  lastOffset = suballoc.offset + suballoc.size;
    8602  ++nextAlloc2ndIndex;
    8603  }
    8604  // We are at the end.
    8605  else
    8606  {
    8607  if(lastOffset < freeSpace2ndTo1stEnd)
    8608  {
    8609  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8610  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8612  }
    8613 
    8614  // End of loop.
    8615  lastOffset = freeSpace2ndTo1stEnd;
    8616  }
    8617  }
    8618  }
    8619 
    8620  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8621  while(lastOffset < freeSpace1stTo2ndEnd)
    8622  {
    8623  // Find next non-null allocation or move nextAllocIndex to the end.
    8624  while(nextAlloc1stIndex < suballoc1stCount &&
    8625  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8626  {
    8627  ++nextAlloc1stIndex;
    8628  }
    8629 
    8630  // Found non-null allocation.
    8631  if(nextAlloc1stIndex < suballoc1stCount)
    8632  {
    8633  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8634 
    8635  // 1. Process free space before this allocation.
    8636  if(lastOffset < suballoc.offset)
    8637  {
    8638  // There is free space from lastOffset to suballoc.offset.
    8639  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8640  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8641  }
    8642 
    8643  // 2. Process this allocation.
    8644  // There is allocation with suballoc.offset, suballoc.size.
    8645  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8646 
    8647  // 3. Prepare for next iteration.
    8648  lastOffset = suballoc.offset + suballoc.size;
    8649  ++nextAlloc1stIndex;
    8650  }
    8651  // We are at the end.
    8652  else
    8653  {
    8654  if(lastOffset < freeSpace1stTo2ndEnd)
    8655  {
    8656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8658  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8659  }
    8660 
    8661  // End of loop.
    8662  lastOffset = freeSpace1stTo2ndEnd;
    8663  }
    8664  }
    8665 
    8666  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8667  {
    8668  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8669  while(lastOffset < size)
    8670  {
    8671  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8672  while(nextAlloc2ndIndex != SIZE_MAX &&
    8673  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8674  {
    8675  --nextAlloc2ndIndex;
    8676  }
    8677 
    8678  // Found non-null allocation.
    8679  if(nextAlloc2ndIndex != SIZE_MAX)
    8680  {
    8681  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8682 
    8683  // 1. Process free space before this allocation.
    8684  if(lastOffset < suballoc.offset)
    8685  {
    8686  // There is free space from lastOffset to suballoc.offset.
    8687  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8688  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8689  }
    8690 
    8691  // 2. Process this allocation.
    8692  // There is allocation with suballoc.offset, suballoc.size.
    8693  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8694 
    8695  // 3. Prepare for next iteration.
    8696  lastOffset = suballoc.offset + suballoc.size;
    8697  --nextAlloc2ndIndex;
    8698  }
    8699  // We are at the end.
    8700  else
    8701  {
    8702  if(lastOffset < size)
    8703  {
    8704  // There is free space from lastOffset to size.
    8705  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8706  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8707  }
    8708 
    8709  // End of loop.
    8710  lastOffset = size;
    8711  }
    8712  }
    8713  }
    8714 
    8715  PrintDetailedMap_End(json);
    8716 }
    8717 #endif // #if VMA_STATS_STRING_ENABLED
    8718 
    8719 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8720  uint32_t currentFrameIndex,
    8721  uint32_t frameInUseCount,
    8722  VkDeviceSize bufferImageGranularity,
    8723  VkDeviceSize allocSize,
    8724  VkDeviceSize allocAlignment,
    8725  bool upperAddress,
    8726  VmaSuballocationType allocType,
    8727  bool canMakeOtherLost,
    8728  uint32_t strategy,
    8729  VmaAllocationRequest* pAllocationRequest)
    8730 {
    8731  VMA_ASSERT(allocSize > 0);
    8732  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8733  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8734  VMA_HEAVY_ASSERT(Validate());
    8735 
    8736  const VkDeviceSize size = GetSize();
    8737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8739 
    8740  if(upperAddress)
    8741  {
    8742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8743  {
    8744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8745  return false;
    8746  }
    8747 
    8748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8749  if(allocSize > size)
    8750  {
    8751  return false;
    8752  }
    8753  VkDeviceSize resultBaseOffset = size - allocSize;
    8754  if(!suballocations2nd.empty())
    8755  {
    8756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8757  resultBaseOffset = lastSuballoc.offset - allocSize;
    8758  if(allocSize > lastSuballoc.offset)
    8759  {
    8760  return false;
    8761  }
    8762  }
    8763 
    8764  // Start from offset equal to end of free space.
    8765  VkDeviceSize resultOffset = resultBaseOffset;
    8766 
    8767  // Apply VMA_DEBUG_MARGIN at the end.
    8768  if(VMA_DEBUG_MARGIN > 0)
    8769  {
    8770  if(resultOffset < VMA_DEBUG_MARGIN)
    8771  {
    8772  return false;
    8773  }
    8774  resultOffset -= VMA_DEBUG_MARGIN;
    8775  }
    8776 
    8777  // Apply alignment.
    8778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8779 
    8780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8781  // Make bigger alignment if necessary.
    8782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8783  {
    8784  bool bufferImageGranularityConflict = false;
    8785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8786  {
    8787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8789  {
    8790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8791  {
    8792  bufferImageGranularityConflict = true;
    8793  break;
    8794  }
    8795  }
    8796  else
    8797  // Already on previous page.
    8798  break;
    8799  }
    8800  if(bufferImageGranularityConflict)
    8801  {
    8802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8803  }
    8804  }
    8805 
    8806  // There is enough free space.
    8807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8808  suballocations1st.back().offset + suballocations1st.back().size :
    8809  0;
    8810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8811  {
    8812  // Check previous suballocations for BufferImageGranularity conflicts.
    8813  // If conflict exists, allocation cannot be made here.
    8814  if(bufferImageGranularity > 1)
    8815  {
    8816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8817  {
    8818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8820  {
    8821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8822  {
    8823  return false;
    8824  }
    8825  }
    8826  else
    8827  {
    8828  // Already on next page.
    8829  break;
    8830  }
    8831  }
    8832  }
    8833 
    8834  // All tests passed: Success.
    8835  pAllocationRequest->offset = resultOffset;
    8836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8837  pAllocationRequest->sumItemSize = 0;
    8838  // pAllocationRequest->item unused.
    8839  pAllocationRequest->itemsToMakeLostCount = 0;
    8840  return true;
    8841  }
    8842  }
    8843  else // !upperAddress
    8844  {
    8845  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8846  {
    8847  // Try to allocate at the end of 1st vector.
    8848 
    8849  VkDeviceSize resultBaseOffset = 0;
    8850  if(!suballocations1st.empty())
    8851  {
    8852  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8853  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8854  }
    8855 
    8856  // Start from offset equal to beginning of free space.
    8857  VkDeviceSize resultOffset = resultBaseOffset;
    8858 
    8859  // Apply VMA_DEBUG_MARGIN at the beginning.
    8860  if(VMA_DEBUG_MARGIN > 0)
    8861  {
    8862  resultOffset += VMA_DEBUG_MARGIN;
    8863  }
    8864 
    8865  // Apply alignment.
    8866  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8867 
    8868  // Check previous suballocations for BufferImageGranularity conflicts.
    8869  // Make bigger alignment if necessary.
    8870  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8871  {
    8872  bool bufferImageGranularityConflict = false;
    8873  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8874  {
    8875  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8876  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8877  {
    8878  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8879  {
    8880  bufferImageGranularityConflict = true;
    8881  break;
    8882  }
    8883  }
    8884  else
    8885  // Already on previous page.
    8886  break;
    8887  }
    8888  if(bufferImageGranularityConflict)
    8889  {
    8890  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8891  }
    8892  }
    8893 
    8894  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8895  suballocations2nd.back().offset : size;
    8896 
    8897  // There is enough free space at the end after alignment.
    8898  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8899  {
    8900  // Check next suballocations for BufferImageGranularity conflicts.
    8901  // If conflict exists, allocation cannot be made here.
    8902  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8903  {
    8904  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8905  {
    8906  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8907  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8908  {
    8909  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8910  {
    8911  return false;
    8912  }
    8913  }
    8914  else
    8915  {
    8916  // Already on previous page.
    8917  break;
    8918  }
    8919  }
    8920  }
    8921 
    8922  // All tests passed: Success.
    8923  pAllocationRequest->offset = resultOffset;
    8924  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8925  pAllocationRequest->sumItemSize = 0;
    8926  // pAllocationRequest->item unused.
    8927  pAllocationRequest->itemsToMakeLostCount = 0;
    8928  return true;
    8929  }
    8930  }
    8931 
    8932  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8933  // beginning of 1st vector as the end of free space.
    8934  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8935  {
    8936  VMA_ASSERT(!suballocations1st.empty());
    8937 
    8938  VkDeviceSize resultBaseOffset = 0;
    8939  if(!suballocations2nd.empty())
    8940  {
    8941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8942  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8943  }
    8944 
    8945  // Start from offset equal to beginning of free space.
    8946  VkDeviceSize resultOffset = resultBaseOffset;
    8947 
    8948  // Apply VMA_DEBUG_MARGIN at the beginning.
    8949  if(VMA_DEBUG_MARGIN > 0)
    8950  {
    8951  resultOffset += VMA_DEBUG_MARGIN;
    8952  }
    8953 
    8954  // Apply alignment.
    8955  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8956 
    8957  // Check previous suballocations for BufferImageGranularity conflicts.
    8958  // Make bigger alignment if necessary.
    8959  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8960  {
    8961  bool bufferImageGranularityConflict = false;
    8962  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8963  {
    8964  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8965  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8966  {
    8967  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8968  {
    8969  bufferImageGranularityConflict = true;
    8970  break;
    8971  }
    8972  }
    8973  else
    8974  // Already on previous page.
    8975  break;
    8976  }
    8977  if(bufferImageGranularityConflict)
    8978  {
    8979  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8980  }
    8981  }
    8982 
    8983  pAllocationRequest->itemsToMakeLostCount = 0;
    8984  pAllocationRequest->sumItemSize = 0;
    8985  size_t index1st = m_1stNullItemsBeginCount;
    8986 
    8987  if(canMakeOtherLost)
    8988  {
    8989  while(index1st < suballocations1st.size() &&
    8990  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8991  {
    8992  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8993  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8994  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8995  {
    8996  // No problem.
    8997  }
    8998  else
    8999  {
    9000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9001  if(suballoc.hAllocation->CanBecomeLost() &&
    9002  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9003  {
    9004  ++pAllocationRequest->itemsToMakeLostCount;
    9005  pAllocationRequest->sumItemSize += suballoc.size;
    9006  }
    9007  else
    9008  {
    9009  return false;
    9010  }
    9011  }
    9012  ++index1st;
    9013  }
    9014 
    9015  // Check next suballocations for BufferImageGranularity conflicts.
    9016  // If conflict exists, we must mark more allocations lost or fail.
    9017  if(bufferImageGranularity > 1)
    9018  {
    9019  while(index1st < suballocations1st.size())
    9020  {
    9021  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9022  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9023  {
    9024  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9025  {
    9026  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9027  if(suballoc.hAllocation->CanBecomeLost() &&
    9028  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9029  {
    9030  ++pAllocationRequest->itemsToMakeLostCount;
    9031  pAllocationRequest->sumItemSize += suballoc.size;
    9032  }
    9033  else
    9034  {
    9035  return false;
    9036  }
    9037  }
    9038  }
    9039  else
    9040  {
    9041  // Already on next page.
    9042  break;
    9043  }
    9044  ++index1st;
    9045  }
    9046  }
    9047  }
    9048 
    9049  // There is enough free space at the end after alignment.
    9050  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9051  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9052  {
    9053  // Check next suballocations for BufferImageGranularity conflicts.
    9054  // If conflict exists, allocation cannot be made here.
    9055  if(bufferImageGranularity > 1)
    9056  {
    9057  for(size_t nextSuballocIndex = index1st;
    9058  nextSuballocIndex < suballocations1st.size();
    9059  nextSuballocIndex++)
    9060  {
    9061  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9062  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9063  {
    9064  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9065  {
    9066  return false;
    9067  }
    9068  }
    9069  else
    9070  {
    9071  // Already on next page.
    9072  break;
    9073  }
    9074  }
    9075  }
    9076 
    9077  // All tests passed: Success.
    9078  pAllocationRequest->offset = resultOffset;
    9079  pAllocationRequest->sumFreeSize =
    9080  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9081  - resultBaseOffset
    9082  - pAllocationRequest->sumItemSize;
    9083  // pAllocationRequest->item unused.
    9084  return true;
    9085  }
    9086  }
    9087  }
    9088 
    9089  return false;
    9090 }
    9091 
    9092 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9093  uint32_t currentFrameIndex,
    9094  uint32_t frameInUseCount,
    9095  VmaAllocationRequest* pAllocationRequest)
    9096 {
    9097  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9098  {
    9099  return true;
    9100  }
    9101 
    9102  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9103 
    9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9105  size_t index1st = m_1stNullItemsBeginCount;
    9106  size_t madeLostCount = 0;
    9107  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9108  {
    9109  VMA_ASSERT(index1st < suballocations1st.size());
    9110  VmaSuballocation& suballoc = suballocations1st[index1st];
    9111  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9112  {
    9113  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9114  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9115  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9116  {
    9117  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9118  suballoc.hAllocation = VK_NULL_HANDLE;
    9119  m_SumFreeSize += suballoc.size;
    9120  ++m_1stNullItemsMiddleCount;
    9121  ++madeLostCount;
    9122  }
    9123  else
    9124  {
    9125  return false;
    9126  }
    9127  }
    9128  ++index1st;
    9129  }
    9130 
    9131  CleanupAfterFree();
    9132  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9133 
    9134  return true;
    9135 }
    9136 
    9137 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9138 {
    9139  uint32_t lostAllocationCount = 0;
    9140 
    9141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9142  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9143  {
    9144  VmaSuballocation& suballoc = suballocations1st[i];
    9145  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9146  suballoc.hAllocation->CanBecomeLost() &&
    9147  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9148  {
    9149  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9150  suballoc.hAllocation = VK_NULL_HANDLE;
    9151  ++m_1stNullItemsMiddleCount;
    9152  m_SumFreeSize += suballoc.size;
    9153  ++lostAllocationCount;
    9154  }
    9155  }
    9156 
    9157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9159  {
    9160  VmaSuballocation& suballoc = suballocations2nd[i];
    9161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9162  suballoc.hAllocation->CanBecomeLost() &&
    9163  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9164  {
    9165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9166  suballoc.hAllocation = VK_NULL_HANDLE;
    9167  ++m_2ndNullItemsCount;
    9168  ++lostAllocationCount;
    9169  }
    9170  }
    9171 
    9172  if(lostAllocationCount)
    9173  {
    9174  CleanupAfterFree();
    9175  }
    9176 
    9177  return lostAllocationCount;
    9178 }
    9179 
    9180 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9181 {
    9182  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9183  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9184  {
    9185  const VmaSuballocation& suballoc = suballocations1st[i];
    9186  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9187  {
    9188  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9189  {
    9190  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9191  return VK_ERROR_VALIDATION_FAILED_EXT;
    9192  }
    9193  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9194  {
    9195  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9196  return VK_ERROR_VALIDATION_FAILED_EXT;
    9197  }
    9198  }
    9199  }
    9200 
    9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9202  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9203  {
    9204  const VmaSuballocation& suballoc = suballocations2nd[i];
    9205  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9206  {
    9207  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9208  {
    9209  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9210  return VK_ERROR_VALIDATION_FAILED_EXT;
    9211  }
    9212  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9213  {
    9214  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9215  return VK_ERROR_VALIDATION_FAILED_EXT;
    9216  }
    9217  }
    9218  }
    9219 
    9220  return VK_SUCCESS;
    9221 }
    9222 
    9223 void VmaBlockMetadata_Linear::Alloc(
    9224  const VmaAllocationRequest& request,
    9225  VmaSuballocationType type,
    9226  VkDeviceSize allocSize,
    9227  bool upperAddress,
    9228  VmaAllocation hAllocation)
    9229 {
    9230  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9231 
    9232  if(upperAddress)
    9233  {
    9234  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9235  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9236  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9237  suballocations2nd.push_back(newSuballoc);
    9238  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9239  }
    9240  else
    9241  {
    9242  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9243 
    9244  // First allocation.
    9245  if(suballocations1st.empty())
    9246  {
    9247  suballocations1st.push_back(newSuballoc);
    9248  }
    9249  else
    9250  {
    9251  // New allocation at the end of 1st vector.
    9252  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9253  {
    9254  // Check if it fits before the end of the block.
    9255  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9256  suballocations1st.push_back(newSuballoc);
    9257  }
    9258  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9259  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9260  {
    9261  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9262 
    9263  switch(m_2ndVectorMode)
    9264  {
    9265  case SECOND_VECTOR_EMPTY:
    9266  // First allocation from second part ring buffer.
    9267  VMA_ASSERT(suballocations2nd.empty());
    9268  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9269  break;
    9270  case SECOND_VECTOR_RING_BUFFER:
    9271  // 2-part ring buffer is already started.
    9272  VMA_ASSERT(!suballocations2nd.empty());
    9273  break;
    9274  case SECOND_VECTOR_DOUBLE_STACK:
    9275  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9276  break;
    9277  default:
    9278  VMA_ASSERT(0);
    9279  }
    9280 
    9281  suballocations2nd.push_back(newSuballoc);
    9282  }
    9283  else
    9284  {
    9285  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9286  }
    9287  }
    9288  }
    9289 
    9290  m_SumFreeSize -= newSuballoc.size;
    9291 }
    9292 
    9293 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9294 {
    9295  FreeAtOffset(allocation->GetOffset());
    9296 }
    9297 
    9298 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9299 {
    9300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9301  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9302 
    9303  if(!suballocations1st.empty())
    9304  {
    9305  // First allocation: Mark it as next empty at the beginning.
    9306  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9307  if(firstSuballoc.offset == offset)
    9308  {
    9309  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9310  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9311  m_SumFreeSize += firstSuballoc.size;
    9312  ++m_1stNullItemsBeginCount;
    9313  CleanupAfterFree();
    9314  return;
    9315  }
    9316  }
    9317 
    9318  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9320  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9321  {
    9322  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9323  if(lastSuballoc.offset == offset)
    9324  {
    9325  m_SumFreeSize += lastSuballoc.size;
    9326  suballocations2nd.pop_back();
    9327  CleanupAfterFree();
    9328  return;
    9329  }
    9330  }
    9331  // Last allocation in 1st vector.
    9332  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9333  {
    9334  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9335  if(lastSuballoc.offset == offset)
    9336  {
    9337  m_SumFreeSize += lastSuballoc.size;
    9338  suballocations1st.pop_back();
    9339  CleanupAfterFree();
    9340  return;
    9341  }
    9342  }
    9343 
    9344  // Item from the middle of 1st vector.
    9345  {
    9346  VmaSuballocation refSuballoc;
    9347  refSuballoc.offset = offset;
    9348  // Rest of members stays uninitialized intentionally for better performance.
    9349  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9350  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9351  suballocations1st.end(),
    9352  refSuballoc);
    9353  if(it != suballocations1st.end())
    9354  {
    9355  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9356  it->hAllocation = VK_NULL_HANDLE;
    9357  ++m_1stNullItemsMiddleCount;
    9358  m_SumFreeSize += it->size;
    9359  CleanupAfterFree();
    9360  return;
    9361  }
    9362  }
    9363 
    9364  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9365  {
    9366  // Item from the middle of 2nd vector.
    9367  VmaSuballocation refSuballoc;
    9368  refSuballoc.offset = offset;
    9369  // Rest of members stays uninitialized intentionally for better performance.
    9370  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9371  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9372  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9373  if(it != suballocations2nd.end())
    9374  {
    9375  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9376  it->hAllocation = VK_NULL_HANDLE;
    9377  ++m_2ndNullItemsCount;
    9378  m_SumFreeSize += it->size;
    9379  CleanupAfterFree();
    9380  return;
    9381  }
    9382  }
    9383 
    9384  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9385 }
    9386 
    9387 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9388 {
    9389  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9390  const size_t suballocCount = AccessSuballocations1st().size();
    9391  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9392 }
    9393 
    9394 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9395 {
    9396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9398 
    9399  if(IsEmpty())
    9400  {
    9401  suballocations1st.clear();
    9402  suballocations2nd.clear();
    9403  m_1stNullItemsBeginCount = 0;
    9404  m_1stNullItemsMiddleCount = 0;
    9405  m_2ndNullItemsCount = 0;
    9406  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9407  }
    9408  else
    9409  {
    9410  const size_t suballoc1stCount = suballocations1st.size();
    9411  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9412  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9413 
    9414  // Find more null items at the beginning of 1st vector.
    9415  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9416  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9417  {
    9418  ++m_1stNullItemsBeginCount;
    9419  --m_1stNullItemsMiddleCount;
    9420  }
    9421 
    9422  // Find more null items at the end of 1st vector.
    9423  while(m_1stNullItemsMiddleCount > 0 &&
    9424  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9425  {
    9426  --m_1stNullItemsMiddleCount;
    9427  suballocations1st.pop_back();
    9428  }
    9429 
    9430  // Find more null items at the end of 2nd vector.
    9431  while(m_2ndNullItemsCount > 0 &&
    9432  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9433  {
    9434  --m_2ndNullItemsCount;
    9435  suballocations2nd.pop_back();
    9436  }
    9437 
    9438  if(ShouldCompact1st())
    9439  {
    9440  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9441  size_t srcIndex = m_1stNullItemsBeginCount;
    9442  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9443  {
    9444  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9445  {
    9446  ++srcIndex;
    9447  }
    9448  if(dstIndex != srcIndex)
    9449  {
    9450  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9451  }
    9452  ++srcIndex;
    9453  }
    9454  suballocations1st.resize(nonNullItemCount);
    9455  m_1stNullItemsBeginCount = 0;
    9456  m_1stNullItemsMiddleCount = 0;
    9457  }
    9458 
    9459  // 2nd vector became empty.
    9460  if(suballocations2nd.empty())
    9461  {
    9462  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9463  }
    9464 
    9465  // 1st vector became empty.
    9466  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9467  {
    9468  suballocations1st.clear();
    9469  m_1stNullItemsBeginCount = 0;
    9470 
    9471  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9472  {
    9473  // Swap 1st with 2nd. Now 2nd is empty.
    9474  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9475  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9476  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9477  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9478  {
    9479  ++m_1stNullItemsBeginCount;
    9480  --m_1stNullItemsMiddleCount;
    9481  }
    9482  m_2ndNullItemsCount = 0;
    9483  m_1stVectorIndex ^= 1;
    9484  }
    9485  }
    9486  }
    9487 
    9488  VMA_HEAVY_ASSERT(Validate());
    9489 }
    9490 
    9491 
    9493 // class VmaBlockMetadata_Buddy
    9494 
    9495 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9496  VmaBlockMetadata(hAllocator),
    9497  m_Root(VMA_NULL),
    9498  m_AllocationCount(0),
    9499  m_FreeCount(1),
    9500  m_SumFreeSize(0)
    9501 {
    9502  memset(m_FreeList, 0, sizeof(m_FreeList));
    9503 }
    9504 
    9505 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9506 {
    9507  DeleteNode(m_Root);
    9508 }
    9509 
    9510 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9511 {
    9512  VmaBlockMetadata::Init(size);
    9513 
    9514  m_UsableSize = VmaPrevPow2(size);
    9515  m_SumFreeSize = m_UsableSize;
    9516 
    9517  // Calculate m_LevelCount.
    9518  m_LevelCount = 1;
    9519  while(m_LevelCount < MAX_LEVELS &&
    9520  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9521  {
    9522  ++m_LevelCount;
    9523  }
    9524 
    9525  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9526  rootNode->offset = 0;
    9527  rootNode->type = Node::TYPE_FREE;
    9528  rootNode->parent = VMA_NULL;
    9529  rootNode->buddy = VMA_NULL;
    9530 
    9531  m_Root = rootNode;
    9532  AddToFreeListFront(0, rootNode);
    9533 }
    9534 
    9535 bool VmaBlockMetadata_Buddy::Validate() const
    9536 {
    9537  // Validate tree.
    9538  ValidationContext ctx;
    9539  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9540  {
    9541  VMA_VALIDATE(false && "ValidateNode failed.");
    9542  }
    9543  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9544  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9545 
    9546  // Validate free node lists.
    9547  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9548  {
    9549  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9550  m_FreeList[level].front->free.prev == VMA_NULL);
    9551 
    9552  for(Node* node = m_FreeList[level].front;
    9553  node != VMA_NULL;
    9554  node = node->free.next)
    9555  {
    9556  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9557 
    9558  if(node->free.next == VMA_NULL)
    9559  {
    9560  VMA_VALIDATE(m_FreeList[level].back == node);
    9561  }
    9562  else
    9563  {
    9564  VMA_VALIDATE(node->free.next->free.prev == node);
    9565  }
    9566  }
    9567  }
    9568 
    9569  // Validate that free lists ar higher levels are empty.
    9570  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9571  {
    9572  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9573  }
    9574 
    9575  return true;
    9576 }
    9577 
    9578 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9579 {
    9580  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9581  {
    9582  if(m_FreeList[level].front != VMA_NULL)
    9583  {
    9584  return LevelToNodeSize(level);
    9585  }
    9586  }
    9587  return 0;
    9588 }
    9589 
    9590 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9591 {
    9592  const VkDeviceSize unusableSize = GetUnusableSize();
    9593 
    9594  outInfo.blockCount = 1;
    9595 
    9596  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9597  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9598 
    9599  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9600  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9601  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9602 
    9603  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9604 
    9605  if(unusableSize > 0)
    9606  {
    9607  ++outInfo.unusedRangeCount;
    9608  outInfo.unusedBytes += unusableSize;
    9609  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9611  }
    9612 }
    9613 
    9614 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9615 {
    9616  const VkDeviceSize unusableSize = GetUnusableSize();
    9617 
    9618  inoutStats.size += GetSize();
    9619  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9620  inoutStats.allocationCount += m_AllocationCount;
    9621  inoutStats.unusedRangeCount += m_FreeCount;
    9622  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9623 
    9624  if(unusableSize > 0)
    9625  {
    9626  ++inoutStats.unusedRangeCount;
    9627  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9628  }
    9629 }
    9630 
    9631 #if VMA_STATS_STRING_ENABLED
    9632 
    9633 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9634 {
    9635  // TODO optimize
    9636  VmaStatInfo stat;
    9637  CalcAllocationStatInfo(stat);
    9638 
    9639  PrintDetailedMap_Begin(
    9640  json,
    9641  stat.unusedBytes,
    9642  stat.allocationCount,
    9643  stat.unusedRangeCount);
    9644 
    9645  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9646 
    9647  const VkDeviceSize unusableSize = GetUnusableSize();
    9648  if(unusableSize > 0)
    9649  {
    9650  PrintDetailedMap_UnusedRange(json,
    9651  m_UsableSize, // offset
    9652  unusableSize); // size
    9653  }
    9654 
    9655  PrintDetailedMap_End(json);
    9656 }
    9657 
    9658 #endif // #if VMA_STATS_STRING_ENABLED
    9659 
    9660 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9661  uint32_t currentFrameIndex,
    9662  uint32_t frameInUseCount,
    9663  VkDeviceSize bufferImageGranularity,
    9664  VkDeviceSize allocSize,
    9665  VkDeviceSize allocAlignment,
    9666  bool upperAddress,
    9667  VmaSuballocationType allocType,
    9668  bool canMakeOtherLost,
    9669  uint32_t strategy,
    9670  VmaAllocationRequest* pAllocationRequest)
    9671 {
    9672  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9673 
    9674  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9675  // Whenever it might be an OPTIMAL image...
    9676  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9677  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9678  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9679  {
    9680  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9681  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9682  }
    9683 
    9684  if(allocSize > m_UsableSize)
    9685  {
    9686  return false;
    9687  }
    9688 
    9689  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9690  for(uint32_t level = targetLevel + 1; level--; )
    9691  {
    9692  for(Node* freeNode = m_FreeList[level].front;
    9693  freeNode != VMA_NULL;
    9694  freeNode = freeNode->free.next)
    9695  {
    9696  if(freeNode->offset % allocAlignment == 0)
    9697  {
    9698  pAllocationRequest->offset = freeNode->offset;
    9699  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9700  pAllocationRequest->sumItemSize = 0;
    9701  pAllocationRequest->itemsToMakeLostCount = 0;
    9702  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9703  return true;
    9704  }
    9705  }
    9706  }
    9707 
    9708  return false;
    9709 }
    9710 
    9711 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9712  uint32_t currentFrameIndex,
    9713  uint32_t frameInUseCount,
    9714  VmaAllocationRequest* pAllocationRequest)
    9715 {
    9716  /*
    9717  Lost allocations are not supported in buddy allocator at the moment.
    9718  Support might be added in the future.
    9719  */
    9720  return pAllocationRequest->itemsToMakeLostCount == 0;
    9721 }
    9722 
    9723 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9724 {
    9725  /*
    9726  Lost allocations are not supported in buddy allocator at the moment.
    9727  Support might be added in the future.
    9728  */
    9729  return 0;
    9730 }
    9731 
    9732 void VmaBlockMetadata_Buddy::Alloc(
    9733  const VmaAllocationRequest& request,
    9734  VmaSuballocationType type,
    9735  VkDeviceSize allocSize,
    9736  bool upperAddress,
    9737  VmaAllocation hAllocation)
    9738 {
    9739  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9740  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9741 
    9742  Node* currNode = m_FreeList[currLevel].front;
    9743  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9744  while(currNode->offset != request.offset)
    9745  {
    9746  currNode = currNode->free.next;
    9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9748  }
    9749 
    9750  // Go down, splitting free nodes.
    9751  while(currLevel < targetLevel)
    9752  {
    9753  // currNode is already first free node at currLevel.
    9754  // Remove it from list of free nodes at this currLevel.
    9755  RemoveFromFreeList(currLevel, currNode);
    9756 
    9757  const uint32_t childrenLevel = currLevel + 1;
    9758 
    9759  // Create two free sub-nodes.
    9760  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9761  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9762 
    9763  leftChild->offset = currNode->offset;
    9764  leftChild->type = Node::TYPE_FREE;
    9765  leftChild->parent = currNode;
    9766  leftChild->buddy = rightChild;
    9767 
    9768  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9769  rightChild->type = Node::TYPE_FREE;
    9770  rightChild->parent = currNode;
    9771  rightChild->buddy = leftChild;
    9772 
    9773  // Convert current currNode to split type.
    9774  currNode->type = Node::TYPE_SPLIT;
    9775  currNode->split.leftChild = leftChild;
    9776 
    9777  // Add child nodes to free list. Order is important!
    9778  AddToFreeListFront(childrenLevel, rightChild);
    9779  AddToFreeListFront(childrenLevel, leftChild);
    9780 
    9781  ++m_FreeCount;
    9782  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9783  ++currLevel;
    9784  currNode = m_FreeList[currLevel].front;
    9785 
    9786  /*
    9787  We can be sure that currNode, as left child of node previously split,
    9788  also fullfills the alignment requirement.
    9789  */
    9790  }
    9791 
    9792  // Remove from free list.
    9793  VMA_ASSERT(currLevel == targetLevel &&
    9794  currNode != VMA_NULL &&
    9795  currNode->type == Node::TYPE_FREE);
    9796  RemoveFromFreeList(currLevel, currNode);
    9797 
    9798  // Convert to allocation node.
    9799  currNode->type = Node::TYPE_ALLOCATION;
    9800  currNode->allocation.alloc = hAllocation;
    9801 
    9802  ++m_AllocationCount;
    9803  --m_FreeCount;
    9804  m_SumFreeSize -= allocSize;
    9805 }
    9806 
    9807 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9808 {
    9809  if(node->type == Node::TYPE_SPLIT)
    9810  {
    9811  DeleteNode(node->split.leftChild->buddy);
    9812  DeleteNode(node->split.leftChild);
    9813  }
    9814 
    9815  vma_delete(GetAllocationCallbacks(), node);
    9816 }
    9817 
    9818 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9819 {
    9820  VMA_VALIDATE(level < m_LevelCount);
    9821  VMA_VALIDATE(curr->parent == parent);
    9822  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9823  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9824  switch(curr->type)
    9825  {
    9826  case Node::TYPE_FREE:
    9827  // curr->free.prev, next are validated separately.
    9828  ctx.calculatedSumFreeSize += levelNodeSize;
    9829  ++ctx.calculatedFreeCount;
    9830  break;
    9831  case Node::TYPE_ALLOCATION:
    9832  ++ctx.calculatedAllocationCount;
    9833  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9834  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9835  break;
    9836  case Node::TYPE_SPLIT:
    9837  {
    9838  const uint32_t childrenLevel = level + 1;
    9839  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9840  const Node* const leftChild = curr->split.leftChild;
    9841  VMA_VALIDATE(leftChild != VMA_NULL);
    9842  VMA_VALIDATE(leftChild->offset == curr->offset);
    9843  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9844  {
    9845  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9846  }
    9847  const Node* const rightChild = leftChild->buddy;
    9848  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9849  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9850  {
    9851  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9852  }
    9853  }
    9854  break;
    9855  default:
    9856  return false;
    9857  }
    9858 
    9859  return true;
    9860 }
    9861 
    9862 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9863 {
    9864  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9865  uint32_t level = 0;
    9866  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9867  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9868  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9869  {
    9870  ++level;
    9871  currLevelNodeSize = nextLevelNodeSize;
    9872  nextLevelNodeSize = currLevelNodeSize >> 1;
    9873  }
    9874  return level;
    9875 }
    9876 
    9877 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9878 {
    9879  // Find node and level.
    9880  Node* node = m_Root;
    9881  VkDeviceSize nodeOffset = 0;
    9882  uint32_t level = 0;
    9883  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9884  while(node->type == Node::TYPE_SPLIT)
    9885  {
    9886  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9887  if(offset < nodeOffset + nextLevelSize)
    9888  {
    9889  node = node->split.leftChild;
    9890  }
    9891  else
    9892  {
    9893  node = node->split.leftChild->buddy;
    9894  nodeOffset += nextLevelSize;
    9895  }
    9896  ++level;
    9897  levelNodeSize = nextLevelSize;
    9898  }
    9899 
    9900  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9901  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9902 
    9903  ++m_FreeCount;
    9904  --m_AllocationCount;
    9905  m_SumFreeSize += alloc->GetSize();
    9906 
    9907  node->type = Node::TYPE_FREE;
    9908 
    9909  // Join free nodes if possible.
    9910  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9911  {
    9912  RemoveFromFreeList(level, node->buddy);
    9913  Node* const parent = node->parent;
    9914 
    9915  vma_delete(GetAllocationCallbacks(), node->buddy);
    9916  vma_delete(GetAllocationCallbacks(), node);
    9917  parent->type = Node::TYPE_FREE;
    9918 
    9919  node = parent;
    9920  --level;
    9921  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9922  --m_FreeCount;
    9923  }
    9924 
    9925  AddToFreeListFront(level, node);
    9926 }
    9927 
    9928 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9929 {
    9930  switch(node->type)
    9931  {
    9932  case Node::TYPE_FREE:
    9933  ++outInfo.unusedRangeCount;
    9934  outInfo.unusedBytes += levelNodeSize;
    9935  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9936  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9937  break;
    9938  case Node::TYPE_ALLOCATION:
    9939  {
    9940  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9941  ++outInfo.allocationCount;
    9942  outInfo.usedBytes += allocSize;
    9943  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9944  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9945 
    9946  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9947  if(unusedRangeSize > 0)
    9948  {
    9949  ++outInfo.unusedRangeCount;
    9950  outInfo.unusedBytes += unusedRangeSize;
    9951  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9952  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9953  }
    9954  }
    9955  break;
    9956  case Node::TYPE_SPLIT:
    9957  {
    9958  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9959  const Node* const leftChild = node->split.leftChild;
    9960  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9961  const Node* const rightChild = leftChild->buddy;
    9962  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9963  }
    9964  break;
    9965  default:
    9966  VMA_ASSERT(0);
    9967  }
    9968 }
    9969 
    9970 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9971 {
    9972  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9973 
    9974  // List is empty.
    9975  Node* const frontNode = m_FreeList[level].front;
    9976  if(frontNode == VMA_NULL)
    9977  {
    9978  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9979  node->free.prev = node->free.next = VMA_NULL;
    9980  m_FreeList[level].front = m_FreeList[level].back = node;
    9981  }
    9982  else
    9983  {
    9984  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9985  node->free.prev = VMA_NULL;
    9986  node->free.next = frontNode;
    9987  frontNode->free.prev = node;
    9988  m_FreeList[level].front = node;
    9989  }
    9990 }
    9991 
    9992 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9993 {
    9994  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9995 
    9996  // It is at the front.
    9997  if(node->free.prev == VMA_NULL)
    9998  {
    9999  VMA_ASSERT(m_FreeList[level].front == node);
    10000  m_FreeList[level].front = node->free.next;
    10001  }
    10002  else
    10003  {
    10004  Node* const prevFreeNode = node->free.prev;
    10005  VMA_ASSERT(prevFreeNode->free.next == node);
    10006  prevFreeNode->free.next = node->free.next;
    10007  }
    10008 
    10009  // It is at the back.
    10010  if(node->free.next == VMA_NULL)
    10011  {
    10012  VMA_ASSERT(m_FreeList[level].back == node);
    10013  m_FreeList[level].back = node->free.prev;
    10014  }
    10015  else
    10016  {
    10017  Node* const nextFreeNode = node->free.next;
    10018  VMA_ASSERT(nextFreeNode->free.prev == node);
    10019  nextFreeNode->free.prev = node->free.prev;
    10020  }
    10021 }
    10022 
    10023 #if VMA_STATS_STRING_ENABLED
    10024 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10025 {
    10026  switch(node->type)
    10027  {
    10028  case Node::TYPE_FREE:
    10029  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10030  break;
    10031  case Node::TYPE_ALLOCATION:
    10032  {
    10033  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10035  if(allocSize < levelNodeSize)
    10036  {
    10037  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10038  }
    10039  }
    10040  break;
    10041  case Node::TYPE_SPLIT:
    10042  {
    10043  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10044  const Node* const leftChild = node->split.leftChild;
    10045  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10046  const Node* const rightChild = leftChild->buddy;
    10047  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10048  }
    10049  break;
    10050  default:
    10051  VMA_ASSERT(0);
    10052  }
    10053 }
    10054 #endif // #if VMA_STATS_STRING_ENABLED
    10055 
    10056 
    10058 // class VmaDeviceMemoryBlock
    10059 
    10060 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10061  m_pMetadata(VMA_NULL),
    10062  m_MemoryTypeIndex(UINT32_MAX),
    10063  m_Id(0),
    10064  m_hMemory(VK_NULL_HANDLE),
    10065  m_MapCount(0),
    10066  m_pMappedData(VMA_NULL)
    10067 {
    10068 }
    10069 
    10070 void VmaDeviceMemoryBlock::Init(
    10071  VmaAllocator hAllocator,
    10072  uint32_t newMemoryTypeIndex,
    10073  VkDeviceMemory newMemory,
    10074  VkDeviceSize newSize,
    10075  uint32_t id,
    10076  uint32_t algorithm)
    10077 {
    10078  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10079 
    10080  m_MemoryTypeIndex = newMemoryTypeIndex;
    10081  m_Id = id;
    10082  m_hMemory = newMemory;
    10083 
    10084  switch(algorithm)
    10085  {
    10087  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10088  break;
    10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10091  break;
    10092  default:
    10093  VMA_ASSERT(0);
    10094  // Fall-through.
    10095  case 0:
    10096  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10097  }
    10098  m_pMetadata->Init(newSize);
    10099 }
    10100 
    10101 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10102 {
    10103  // This is the most important assert in the entire library.
    10104  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10105  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10106 
    10107  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10108  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10109  m_hMemory = VK_NULL_HANDLE;
    10110 
    10111  vma_delete(allocator, m_pMetadata);
    10112  m_pMetadata = VMA_NULL;
    10113 }
    10114 
    10115 bool VmaDeviceMemoryBlock::Validate() const
    10116 {
    10117  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10118  (m_pMetadata->GetSize() != 0));
    10119 
    10120  return m_pMetadata->Validate();
    10121 }
    10122 
    10123 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10124 {
    10125  void* pData = nullptr;
    10126  VkResult res = Map(hAllocator, 1, &pData);
    10127  if(res != VK_SUCCESS)
    10128  {
    10129  return res;
    10130  }
    10131 
    10132  res = m_pMetadata->CheckCorruption(pData);
    10133 
    10134  Unmap(hAllocator, 1);
    10135 
    10136  return res;
    10137 }
    10138 
    10139 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10140 {
    10141  if(count == 0)
    10142  {
    10143  return VK_SUCCESS;
    10144  }
    10145 
    10146  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10147  if(m_MapCount != 0)
    10148  {
    10149  m_MapCount += count;
    10150  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10151  if(ppData != VMA_NULL)
    10152  {
    10153  *ppData = m_pMappedData;
    10154  }
    10155  return VK_SUCCESS;
    10156  }
    10157  else
    10158  {
    10159  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10160  hAllocator->m_hDevice,
    10161  m_hMemory,
    10162  0, // offset
    10163  VK_WHOLE_SIZE,
    10164  0, // flags
    10165  &m_pMappedData);
    10166  if(result == VK_SUCCESS)
    10167  {
    10168  if(ppData != VMA_NULL)
    10169  {
    10170  *ppData = m_pMappedData;
    10171  }
    10172  m_MapCount = count;
    10173  }
    10174  return result;
    10175  }
    10176 }
    10177 
    10178 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10179 {
    10180  if(count == 0)
    10181  {
    10182  return;
    10183  }
    10184 
    10185  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10186  if(m_MapCount >= count)
    10187  {
    10188  m_MapCount -= count;
    10189  if(m_MapCount == 0)
    10190  {
    10191  m_pMappedData = VMA_NULL;
    10192  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10193  }
    10194  }
    10195  else
    10196  {
    10197  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10198  }
    10199 }
    10200 
    10201 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10202 {
    10203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10205 
    10206  void* pData;
    10207  VkResult res = Map(hAllocator, 1, &pData);
    10208  if(res != VK_SUCCESS)
    10209  {
    10210  return res;
    10211  }
    10212 
    10213  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10214  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10215 
    10216  Unmap(hAllocator, 1);
    10217 
    10218  return VK_SUCCESS;
    10219 }
    10220 
    10221 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10222 {
    10223  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10224  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10225 
    10226  void* pData;
    10227  VkResult res = Map(hAllocator, 1, &pData);
    10228  if(res != VK_SUCCESS)
    10229  {
    10230  return res;
    10231  }
    10232 
    10233  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10234  {
    10235  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10236  }
    10237  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10240  }
    10241 
    10242  Unmap(hAllocator, 1);
    10243 
    10244  return VK_SUCCESS;
    10245 }
    10246 
    10247 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10248  const VmaAllocator hAllocator,
    10249  const VmaAllocation hAllocation,
    10250  VkBuffer hBuffer)
    10251 {
    10252  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10253  hAllocation->GetBlock() == this);
    10254  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10255  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10256  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10257  hAllocator->m_hDevice,
    10258  hBuffer,
    10259  m_hMemory,
    10260  hAllocation->GetOffset());
    10261 }
    10262 
    10263 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10264  const VmaAllocator hAllocator,
    10265  const VmaAllocation hAllocation,
    10266  VkImage hImage)
    10267 {
    10268  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10269  hAllocation->GetBlock() == this);
    10270  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10272  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10273  hAllocator->m_hDevice,
    10274  hImage,
    10275  m_hMemory,
    10276  hAllocation->GetOffset());
    10277 }
    10278 
    10279 static void InitStatInfo(VmaStatInfo& outInfo)
    10280 {
    10281  memset(&outInfo, 0, sizeof(outInfo));
    10282  outInfo.allocationSizeMin = UINT64_MAX;
    10283  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10284 }
    10285 
    10286 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10287 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10288 {
    10289  inoutInfo.blockCount += srcInfo.blockCount;
    10290  inoutInfo.allocationCount += srcInfo.allocationCount;
    10291  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10292  inoutInfo.usedBytes += srcInfo.usedBytes;
    10293  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10294  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10295  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10296  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10297  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10298 }
    10299 
    10300 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10301 {
    10302  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10304  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10305  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10306 }
    10307 
    10308 VmaPool_T::VmaPool_T(
    10309  VmaAllocator hAllocator,
    10310  const VmaPoolCreateInfo& createInfo,
    10311  VkDeviceSize preferredBlockSize) :
    10312  m_BlockVector(
    10313  hAllocator,
    10314  createInfo.memoryTypeIndex,
    10315  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10316  createInfo.minBlockCount,
    10317  createInfo.maxBlockCount,
    10318  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10319  createInfo.frameInUseCount,
    10320  true, // isCustomPool
    10321  createInfo.blockSize != 0, // explicitBlockSize
    10322  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10323  m_Id(0)
    10324 {
    10325 }
    10326 
    10327 VmaPool_T::~VmaPool_T()
    10328 {
    10329 }
    10330 
    10331 #if VMA_STATS_STRING_ENABLED
    10332 
    10333 #endif // #if VMA_STATS_STRING_ENABLED
    10334 
    10335 VmaBlockVector::VmaBlockVector(
    10336  VmaAllocator hAllocator,
    10337  uint32_t memoryTypeIndex,
    10338  VkDeviceSize preferredBlockSize,
    10339  size_t minBlockCount,
    10340  size_t maxBlockCount,
    10341  VkDeviceSize bufferImageGranularity,
    10342  uint32_t frameInUseCount,
    10343  bool isCustomPool,
    10344  bool explicitBlockSize,
    10345  uint32_t algorithm) :
    10346  m_hAllocator(hAllocator),
    10347  m_MemoryTypeIndex(memoryTypeIndex),
    10348  m_PreferredBlockSize(preferredBlockSize),
    10349  m_MinBlockCount(minBlockCount),
    10350  m_MaxBlockCount(maxBlockCount),
    10351  m_BufferImageGranularity(bufferImageGranularity),
    10352  m_FrameInUseCount(frameInUseCount),
    10353  m_IsCustomPool(isCustomPool),
    10354  m_ExplicitBlockSize(explicitBlockSize),
    10355  m_Algorithm(algorithm),
    10356  m_HasEmptyBlock(false),
    10357  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10358  m_pDefragmentator(VMA_NULL),
    10359  m_NextBlockId(0)
    10360 {
    10361 }
    10362 
    10363 VmaBlockVector::~VmaBlockVector()
    10364 {
    10365  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10366 
    10367  for(size_t i = m_Blocks.size(); i--; )
    10368  {
    10369  m_Blocks[i]->Destroy(m_hAllocator);
    10370  vma_delete(m_hAllocator, m_Blocks[i]);
    10371  }
    10372 }
    10373 
    10374 VkResult VmaBlockVector::CreateMinBlocks()
    10375 {
    10376  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10377  {
    10378  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10379  if(res != VK_SUCCESS)
    10380  {
    10381  return res;
    10382  }
    10383  }
    10384  return VK_SUCCESS;
    10385 }
    10386 
    10387 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10388 {
    10389  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10390 
    10391  const size_t blockCount = m_Blocks.size();
    10392 
    10393  pStats->size = 0;
    10394  pStats->unusedSize = 0;
    10395  pStats->allocationCount = 0;
    10396  pStats->unusedRangeCount = 0;
    10397  pStats->unusedRangeSizeMax = 0;
    10398  pStats->blockCount = blockCount;
    10399 
    10400  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10401  {
    10402  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10403  VMA_ASSERT(pBlock);
    10404  VMA_HEAVY_ASSERT(pBlock->Validate());
    10405  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10406  }
    10407 }
    10408 
    10409 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10410 {
    10411  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10412  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10413  (VMA_DEBUG_MARGIN > 0) &&
    10414  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10415 }
    10416 
    10417 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10418 
    10419 VkResult VmaBlockVector::Allocate(
    10420  VmaPool hCurrentPool,
    10421  uint32_t currentFrameIndex,
    10422  VkDeviceSize size,
    10423  VkDeviceSize alignment,
    10424  const VmaAllocationCreateInfo& createInfo,
    10425  VmaSuballocationType suballocType,
    10426  VmaAllocation* pAllocation)
    10427 {
    10428  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10429  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10430  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10431  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10432  const bool canCreateNewBlock =
    10433  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10434  (m_Blocks.size() < m_MaxBlockCount);
    10435  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10436 
    10437  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10438  // Which in turn is available only when maxBlockCount = 1.
    10439  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10440  {
    10441  canMakeOtherLost = false;
    10442  }
    10443 
    10444  // Upper address can only be used with linear allocator and within single memory block.
    10445  if(isUpperAddress &&
    10446  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10447  {
    10448  return VK_ERROR_FEATURE_NOT_PRESENT;
    10449  }
    10450 
    10451  // Validate strategy.
    10452  switch(strategy)
    10453  {
    10454  case 0:
    10456  break;
    10460  break;
    10461  default:
    10462  return VK_ERROR_FEATURE_NOT_PRESENT;
    10463  }
    10464 
    10465  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10466  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10467  {
    10468  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10469  }
    10470 
    10471  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10472 
    10473  /*
    10474  Under certain condition, this whole section can be skipped for optimization, so
    10475  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10476  e.g. for custom pools with linear algorithm.
    10477  */
    10478  if(!canMakeOtherLost || canCreateNewBlock)
    10479  {
    10480  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10481  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10483 
    10484  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10485  {
    10486  // Use only last block.
    10487  if(!m_Blocks.empty())
    10488  {
    10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10490  VMA_ASSERT(pCurrBlock);
    10491  VkResult res = AllocateFromBlock(
    10492  pCurrBlock,
    10493  hCurrentPool,
    10494  currentFrameIndex,
    10495  size,
    10496  alignment,
    10497  allocFlagsCopy,
    10498  createInfo.pUserData,
    10499  suballocType,
    10500  strategy,
    10501  pAllocation);
    10502  if(res == VK_SUCCESS)
    10503  {
    10504  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10505  return VK_SUCCESS;
    10506  }
    10507  }
    10508  }
    10509  else
    10510  {
    10512  {
    10513  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10514  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10515  {
    10516  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10517  VMA_ASSERT(pCurrBlock);
    10518  VkResult res = AllocateFromBlock(
    10519  pCurrBlock,
    10520  hCurrentPool,
    10521  currentFrameIndex,
    10522  size,
    10523  alignment,
    10524  allocFlagsCopy,
    10525  createInfo.pUserData,
    10526  suballocType,
    10527  strategy,
    10528  pAllocation);
    10529  if(res == VK_SUCCESS)
    10530  {
    10531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10532  return VK_SUCCESS;
    10533  }
    10534  }
    10535  }
    10536  else // WORST_FIT, FIRST_FIT
    10537  {
    10538  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10539  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10540  {
    10541  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10542  VMA_ASSERT(pCurrBlock);
    10543  VkResult res = AllocateFromBlock(
    10544  pCurrBlock,
    10545  hCurrentPool,
    10546  currentFrameIndex,
    10547  size,
    10548  alignment,
    10549  allocFlagsCopy,
    10550  createInfo.pUserData,
    10551  suballocType,
    10552  strategy,
    10553  pAllocation);
    10554  if(res == VK_SUCCESS)
    10555  {
    10556  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10557  return VK_SUCCESS;
    10558  }
    10559  }
    10560  }
    10561  }
    10562 
    10563  // 2. Try to create new block.
    10564  if(canCreateNewBlock)
    10565  {
    10566  // Calculate optimal size for new block.
    10567  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10568  uint32_t newBlockSizeShift = 0;
    10569  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10570 
    10571  if(!m_ExplicitBlockSize)
    10572  {
    10573  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10574  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10575  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10576  {
    10577  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10578  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10579  {
    10580  newBlockSize = smallerNewBlockSize;
    10581  ++newBlockSizeShift;
    10582  }
    10583  else
    10584  {
    10585  break;
    10586  }
    10587  }
    10588  }
    10589 
    10590  size_t newBlockIndex = 0;
    10591  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10592  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10593  if(!m_ExplicitBlockSize)
    10594  {
    10595  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10596  {
    10597  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10598  if(smallerNewBlockSize >= size)
    10599  {
    10600  newBlockSize = smallerNewBlockSize;
    10601  ++newBlockSizeShift;
    10602  res = CreateBlock(newBlockSize, &newBlockIndex);
    10603  }
    10604  else
    10605  {
    10606  break;
    10607  }
    10608  }
    10609  }
    10610 
    10611  if(res == VK_SUCCESS)
    10612  {
    10613  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10614  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10615 
    10616  res = AllocateFromBlock(
    10617  pBlock,
    10618  hCurrentPool,
    10619  currentFrameIndex,
    10620  size,
    10621  alignment,
    10622  allocFlagsCopy,
    10623  createInfo.pUserData,
    10624  suballocType,
    10625  strategy,
    10626  pAllocation);
    10627  if(res == VK_SUCCESS)
    10628  {
    10629  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10630  return VK_SUCCESS;
    10631  }
    10632  else
    10633  {
    10634  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10636  }
    10637  }
    10638  }
    10639  }
    10640 
    10641  // 3. Try to allocate from existing blocks with making other allocations lost.
    10642  if(canMakeOtherLost)
    10643  {
    10644  uint32_t tryIndex = 0;
    10645  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10646  {
    10647  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10648  VmaAllocationRequest bestRequest = {};
    10649  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10650 
    10651  // 1. Search existing allocations.
    10653  {
    10654  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10655  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10656  {
    10657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10658  VMA_ASSERT(pCurrBlock);
    10659  VmaAllocationRequest currRequest = {};
    10660  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10661  currentFrameIndex,
    10662  m_FrameInUseCount,
    10663  m_BufferImageGranularity,
    10664  size,
    10665  alignment,
    10666  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10667  suballocType,
    10668  canMakeOtherLost,
    10669  strategy,
    10670  &currRequest))
    10671  {
    10672  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10673  if(pBestRequestBlock == VMA_NULL ||
    10674  currRequestCost < bestRequestCost)
    10675  {
    10676  pBestRequestBlock = pCurrBlock;
    10677  bestRequest = currRequest;
    10678  bestRequestCost = currRequestCost;
    10679 
    10680  if(bestRequestCost == 0)
    10681  {
    10682  break;
    10683  }
    10684  }
    10685  }
    10686  }
    10687  }
    10688  else // WORST_FIT, FIRST_FIT
    10689  {
    10690  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10691  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10692  {
    10693  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10694  VMA_ASSERT(pCurrBlock);
    10695  VmaAllocationRequest currRequest = {};
    10696  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10697  currentFrameIndex,
    10698  m_FrameInUseCount,
    10699  m_BufferImageGranularity,
    10700  size,
    10701  alignment,
    10702  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10703  suballocType,
    10704  canMakeOtherLost,
    10705  strategy,
    10706  &currRequest))
    10707  {
    10708  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10709  if(pBestRequestBlock == VMA_NULL ||
    10710  currRequestCost < bestRequestCost ||
    10712  {
    10713  pBestRequestBlock = pCurrBlock;
    10714  bestRequest = currRequest;
    10715  bestRequestCost = currRequestCost;
    10716 
    10717  if(bestRequestCost == 0 ||
    10719  {
    10720  break;
    10721  }
    10722  }
    10723  }
    10724  }
    10725  }
    10726 
    10727  if(pBestRequestBlock != VMA_NULL)
    10728  {
    10729  if(mapped)
    10730  {
    10731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10732  if(res != VK_SUCCESS)
    10733  {
    10734  return res;
    10735  }
    10736  }
    10737 
    10738  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10739  currentFrameIndex,
    10740  m_FrameInUseCount,
    10741  &bestRequest))
    10742  {
    10743  // We no longer have an empty Allocation.
    10744  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10745  {
    10746  m_HasEmptyBlock = false;
    10747  }
    10748  // Allocate from this pBlock.
    10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10750  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10751  (*pAllocation)->InitBlockAllocation(
    10752  hCurrentPool,
    10753  pBestRequestBlock,
    10754  bestRequest.offset,
    10755  alignment,
    10756  size,
    10757  suballocType,
    10758  mapped,
    10759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10763  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10764  {
    10765  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10766  }
    10767  if(IsCorruptionDetectionEnabled())
    10768  {
    10769  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10770  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10771  }
    10772  return VK_SUCCESS;
    10773  }
    10774  // else: Some allocations must have been touched while we are here. Next try.
    10775  }
    10776  else
    10777  {
    10778  // Could not find place in any of the blocks - break outer loop.
    10779  break;
    10780  }
    10781  }
    10782  /* Maximum number of tries exceeded - a very unlike event when many other
    10783  threads are simultaneously touching allocations making it impossible to make
    10784  lost at the same time as we try to allocate. */
    10785  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10786  {
    10787  return VK_ERROR_TOO_MANY_OBJECTS;
    10788  }
    10789  }
    10790 
    10791  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10792 }
    10793 
    10794 void VmaBlockVector::Free(
    10795  VmaAllocation hAllocation)
    10796 {
    10797  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10798 
    10799  // Scope for lock.
    10800  {
    10801  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10802 
    10803  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10804 
    10805  if(IsCorruptionDetectionEnabled())
    10806  {
    10807  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10808  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10809  }
    10810 
    10811  if(hAllocation->IsPersistentMap())
    10812  {
    10813  pBlock->Unmap(m_hAllocator, 1);
    10814  }
    10815 
    10816  pBlock->m_pMetadata->Free(hAllocation);
    10817  VMA_HEAVY_ASSERT(pBlock->Validate());
    10818 
    10819  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10820 
    10821  // pBlock became empty after this deallocation.
    10822  if(pBlock->m_pMetadata->IsEmpty())
    10823  {
    10824  // Already has empty Allocation. We don't want to have two, so delete this one.
    10825  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10826  {
    10827  pBlockToDelete = pBlock;
    10828  Remove(pBlock);
    10829  }
    10830  // We now have first empty block.
    10831  else
    10832  {
    10833  m_HasEmptyBlock = true;
    10834  }
    10835  }
    10836  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10837  // (This is optional, heuristics.)
    10838  else if(m_HasEmptyBlock)
    10839  {
    10840  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10841  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10842  {
    10843  pBlockToDelete = pLastBlock;
    10844  m_Blocks.pop_back();
    10845  m_HasEmptyBlock = false;
    10846  }
    10847  }
    10848 
    10849  IncrementallySortBlocks();
    10850  }
    10851 
    10852  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10853  // lock, for performance reason.
    10854  if(pBlockToDelete != VMA_NULL)
    10855  {
    10856  VMA_DEBUG_LOG(" Deleted empty allocation");
    10857  pBlockToDelete->Destroy(m_hAllocator);
    10858  vma_delete(m_hAllocator, pBlockToDelete);
    10859  }
    10860 }
    10861 
    10862 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10863 {
    10864  VkDeviceSize result = 0;
    10865  for(size_t i = m_Blocks.size(); i--; )
    10866  {
    10867  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10868  if(result >= m_PreferredBlockSize)
    10869  {
    10870  break;
    10871  }
    10872  }
    10873  return result;
    10874 }
    10875 
    10876 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10877 {
    10878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10879  {
    10880  if(m_Blocks[blockIndex] == pBlock)
    10881  {
    10882  VmaVectorRemove(m_Blocks, blockIndex);
    10883  return;
    10884  }
    10885  }
    10886  VMA_ASSERT(0);
    10887 }
    10888 
    10889 void VmaBlockVector::IncrementallySortBlocks()
    10890 {
    10891  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10892  {
    10893  // Bubble sort only until first swap.
    10894  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10895  {
    10896  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10897  {
    10898  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10899  return;
    10900  }
    10901  }
    10902  }
    10903 }
    10904 
    10905 VkResult VmaBlockVector::AllocateFromBlock(
    10906  VmaDeviceMemoryBlock* pBlock,
    10907  VmaPool hCurrentPool,
    10908  uint32_t currentFrameIndex,
    10909  VkDeviceSize size,
    10910  VkDeviceSize alignment,
    10911  VmaAllocationCreateFlags allocFlags,
    10912  void* pUserData,
    10913  VmaSuballocationType suballocType,
    10914  uint32_t strategy,
    10915  VmaAllocation* pAllocation)
    10916 {
    10917  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10918  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10919  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10920  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10921 
    10922  VmaAllocationRequest currRequest = {};
    10923  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10924  currentFrameIndex,
    10925  m_FrameInUseCount,
    10926  m_BufferImageGranularity,
    10927  size,
    10928  alignment,
    10929  isUpperAddress,
    10930  suballocType,
    10931  false, // canMakeOtherLost
    10932  strategy,
    10933  &currRequest))
    10934  {
    10935  // Allocate from pCurrBlock.
    10936  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10937 
    10938  if(mapped)
    10939  {
    10940  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10941  if(res != VK_SUCCESS)
    10942  {
    10943  return res;
    10944  }
    10945  }
    10946 
    10947  // We no longer have an empty Allocation.
    10948  if(pBlock->m_pMetadata->IsEmpty())
    10949  {
    10950  m_HasEmptyBlock = false;
    10951  }
    10952 
    10953  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10954  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10955  (*pAllocation)->InitBlockAllocation(
    10956  hCurrentPool,
    10957  pBlock,
    10958  currRequest.offset,
    10959  alignment,
    10960  size,
    10961  suballocType,
    10962  mapped,
    10963  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10964  VMA_HEAVY_ASSERT(pBlock->Validate());
    10965  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10966  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10967  {
    10968  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10969  }
    10970  if(IsCorruptionDetectionEnabled())
    10971  {
    10972  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10973  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10974  }
    10975  return VK_SUCCESS;
    10976  }
    10977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10978 }
    10979 
    10980 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10981 {
    10982  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10983  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10984  allocInfo.allocationSize = blockSize;
    10985  VkDeviceMemory mem = VK_NULL_HANDLE;
    10986  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10987  if(res < 0)
    10988  {
    10989  return res;
    10990  }
    10991 
    10992  // New VkDeviceMemory successfully created.
    10993 
    10994  // Create new Allocation for it.
    10995  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10996  pBlock->Init(
    10997  m_hAllocator,
    10998  m_MemoryTypeIndex,
    10999  mem,
    11000  allocInfo.allocationSize,
    11001  m_NextBlockId++,
    11002  m_Algorithm);
    11003 
    11004  m_Blocks.push_back(pBlock);
    11005  if(pNewBlockIndex != VMA_NULL)
    11006  {
    11007  *pNewBlockIndex = m_Blocks.size() - 1;
    11008  }
    11009 
    11010  return VK_SUCCESS;
    11011 }
    11012 
    11013 #if VMA_STATS_STRING_ENABLED
    11014 
    11015 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11016 {
    11017  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11018 
    11019  json.BeginObject();
    11020 
    11021  if(m_IsCustomPool)
    11022  {
    11023  json.WriteString("MemoryTypeIndex");
    11024  json.WriteNumber(m_MemoryTypeIndex);
    11025 
    11026  json.WriteString("BlockSize");
    11027  json.WriteNumber(m_PreferredBlockSize);
    11028 
    11029  json.WriteString("BlockCount");
    11030  json.BeginObject(true);
    11031  if(m_MinBlockCount > 0)
    11032  {
    11033  json.WriteString("Min");
    11034  json.WriteNumber((uint64_t)m_MinBlockCount);
    11035  }
    11036  if(m_MaxBlockCount < SIZE_MAX)
    11037  {
    11038  json.WriteString("Max");
    11039  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11040  }
    11041  json.WriteString("Cur");
    11042  json.WriteNumber((uint64_t)m_Blocks.size());
    11043  json.EndObject();
    11044 
    11045  if(m_FrameInUseCount > 0)
    11046  {
    11047  json.WriteString("FrameInUseCount");
    11048  json.WriteNumber(m_FrameInUseCount);
    11049  }
    11050 
    11051  if(m_Algorithm != 0)
    11052  {
    11053  json.WriteString("Algorithm");
    11054  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11055  }
    11056  }
    11057  else
    11058  {
    11059  json.WriteString("PreferredBlockSize");
    11060  json.WriteNumber(m_PreferredBlockSize);
    11061  }
    11062 
    11063  json.WriteString("Blocks");
    11064  json.BeginObject();
    11065  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11066  {
    11067  json.BeginString();
    11068  json.ContinueString(m_Blocks[i]->GetId());
    11069  json.EndString();
    11070 
    11071  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11072  }
    11073  json.EndObject();
    11074 
    11075  json.EndObject();
    11076 }
    11077 
    11078 #endif // #if VMA_STATS_STRING_ENABLED
    11079 
    11080 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11081  VmaAllocator hAllocator,
    11082  uint32_t currentFrameIndex)
    11083 {
    11084  if(m_pDefragmentator == VMA_NULL)
    11085  {
    11086  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11087  hAllocator,
    11088  this,
    11089  currentFrameIndex);
    11090  }
    11091 
    11092  return m_pDefragmentator;
    11093 }
    11094 
    11095 VkResult VmaBlockVector::Defragment(
    11096  VmaDefragmentationStats* pDefragmentationStats,
    11097  VkDeviceSize& maxBytesToMove,
    11098  uint32_t& maxAllocationsToMove)
    11099 {
    11100  if(m_pDefragmentator == VMA_NULL)
    11101  {
    11102  return VK_SUCCESS;
    11103  }
    11104 
    11105  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11106 
    11107  // Defragment.
    11108  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11109 
    11110  // Accumulate statistics.
    11111  if(pDefragmentationStats != VMA_NULL)
    11112  {
    11113  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11114  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11115  pDefragmentationStats->bytesMoved += bytesMoved;
    11116  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11119  maxBytesToMove -= bytesMoved;
    11120  maxAllocationsToMove -= allocationsMoved;
    11121  }
    11122 
    11123  // Free empty blocks.
    11124  m_HasEmptyBlock = false;
    11125  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11126  {
    11127  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11128  if(pBlock->m_pMetadata->IsEmpty())
    11129  {
    11130  if(m_Blocks.size() > m_MinBlockCount)
    11131  {
    11132  if(pDefragmentationStats != VMA_NULL)
    11133  {
    11134  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11135  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11136  }
    11137 
    11138  VmaVectorRemove(m_Blocks, blockIndex);
    11139  pBlock->Destroy(m_hAllocator);
    11140  vma_delete(m_hAllocator, pBlock);
    11141  }
    11142  else
    11143  {
    11144  m_HasEmptyBlock = true;
    11145  }
    11146  }
    11147  }
    11148 
    11149  return result;
    11150 }
    11151 
    11152 void VmaBlockVector::DestroyDefragmentator()
    11153 {
    11154  if(m_pDefragmentator != VMA_NULL)
    11155  {
    11156  vma_delete(m_hAllocator, m_pDefragmentator);
    11157  m_pDefragmentator = VMA_NULL;
    11158  }
    11159 }
    11160 
    11161 void VmaBlockVector::MakePoolAllocationsLost(
    11162  uint32_t currentFrameIndex,
    11163  size_t* pLostAllocationCount)
    11164 {
    11165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11166  size_t lostAllocationCount = 0;
    11167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11168  {
    11169  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11170  VMA_ASSERT(pBlock);
    11171  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11172  }
    11173  if(pLostAllocationCount != VMA_NULL)
    11174  {
    11175  *pLostAllocationCount = lostAllocationCount;
    11176  }
    11177 }
    11178 
    11179 VkResult VmaBlockVector::CheckCorruption()
    11180 {
    11181  if(!IsCorruptionDetectionEnabled())
    11182  {
    11183  return VK_ERROR_FEATURE_NOT_PRESENT;
    11184  }
    11185 
    11186  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11187  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11188  {
    11189  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11190  VMA_ASSERT(pBlock);
    11191  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11192  if(res != VK_SUCCESS)
    11193  {
    11194  return res;
    11195  }
    11196  }
    11197  return VK_SUCCESS;
    11198 }
    11199 
    11200 void VmaBlockVector::AddStats(VmaStats* pStats)
    11201 {
    11202  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11203  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11204 
    11205  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11206 
    11207  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11208  {
    11209  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11210  VMA_ASSERT(pBlock);
    11211  VMA_HEAVY_ASSERT(pBlock->Validate());
    11212  VmaStatInfo allocationStatInfo;
    11213  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11214  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11215  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11216  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11217  }
    11218 }
    11219 
    11221 // VmaDefragmentator members definition
    11222 
    11223 VmaDefragmentator::VmaDefragmentator(
    11224  VmaAllocator hAllocator,
    11225  VmaBlockVector* pBlockVector,
    11226  uint32_t currentFrameIndex) :
    11227  m_hAllocator(hAllocator),
    11228  m_pBlockVector(pBlockVector),
    11229  m_CurrentFrameIndex(currentFrameIndex),
    11230  m_BytesMoved(0),
    11231  m_AllocationsMoved(0),
    11232  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11233  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11234 {
    11235  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11236 }
    11237 
    11238 VmaDefragmentator::~VmaDefragmentator()
    11239 {
    11240  for(size_t i = m_Blocks.size(); i--; )
    11241  {
    11242  vma_delete(m_hAllocator, m_Blocks[i]);
    11243  }
    11244 }
    11245 
    11246 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11247 {
    11248  AllocationInfo allocInfo;
    11249  allocInfo.m_hAllocation = hAlloc;
    11250  allocInfo.m_pChanged = pChanged;
    11251  m_Allocations.push_back(allocInfo);
    11252 }
    11253 
    11254 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11255 {
    11256  // It has already been mapped for defragmentation.
    11257  if(m_pMappedDataForDefragmentation)
    11258  {
    11259  *ppMappedData = m_pMappedDataForDefragmentation;
    11260  return VK_SUCCESS;
    11261  }
    11262 
    11263  // It is originally mapped.
    11264  if(m_pBlock->GetMappedData())
    11265  {
    11266  *ppMappedData = m_pBlock->GetMappedData();
    11267  return VK_SUCCESS;
    11268  }
    11269 
    11270  // Map on first usage.
    11271  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11272  *ppMappedData = m_pMappedDataForDefragmentation;
    11273  return res;
    11274 }
    11275 
    11276 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11277 {
    11278  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11279  {
    11280  m_pBlock->Unmap(hAllocator, 1);
    11281  }
    11282 }
    11283 
    11284 VkResult VmaDefragmentator::DefragmentRound(
    11285  VkDeviceSize maxBytesToMove,
    11286  uint32_t maxAllocationsToMove)
    11287 {
    11288  if(m_Blocks.empty())
    11289  {
    11290  return VK_SUCCESS;
    11291  }
    11292 
    11293  size_t srcBlockIndex = m_Blocks.size() - 1;
    11294  size_t srcAllocIndex = SIZE_MAX;
    11295  for(;;)
    11296  {
    11297  // 1. Find next allocation to move.
    11298  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11299  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11300  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11301  {
    11302  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11303  {
    11304  // Finished: no more allocations to process.
    11305  if(srcBlockIndex == 0)
    11306  {
    11307  return VK_SUCCESS;
    11308  }
    11309  else
    11310  {
    11311  --srcBlockIndex;
    11312  srcAllocIndex = SIZE_MAX;
    11313  }
    11314  }
    11315  else
    11316  {
    11317  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11318  }
    11319  }
    11320 
    11321  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11322  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11323 
    11324  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11325  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11326  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11327  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11328 
    11329  // 2. Try to find new place for this allocation in preceding or current block.
    11330  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11331  {
    11332  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11333  VmaAllocationRequest dstAllocRequest;
    11334  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11335  m_CurrentFrameIndex,
    11336  m_pBlockVector->GetFrameInUseCount(),
    11337  m_pBlockVector->GetBufferImageGranularity(),
    11338  size,
    11339  alignment,
    11340  false, // upperAddress
    11341  suballocType,
    11342  false, // canMakeOtherLost
    11344  &dstAllocRequest) &&
    11345  MoveMakesSense(
    11346  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11347  {
    11348  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11349 
    11350  // Reached limit on number of allocations or bytes to move.
    11351  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11352  (m_BytesMoved + size > maxBytesToMove))
    11353  {
    11354  return VK_INCOMPLETE;
    11355  }
    11356 
    11357  void* pDstMappedData = VMA_NULL;
    11358  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11359  if(res != VK_SUCCESS)
    11360  {
    11361  return res;
    11362  }
    11363 
    11364  void* pSrcMappedData = VMA_NULL;
    11365  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11366  if(res != VK_SUCCESS)
    11367  {
    11368  return res;
    11369  }
    11370 
    11371  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11372  memcpy(
    11373  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11374  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11375  static_cast<size_t>(size));
    11376 
    11377  if(VMA_DEBUG_MARGIN > 0)
    11378  {
    11379  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11380  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11381  }
    11382 
    11383  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11384  dstAllocRequest,
    11385  suballocType,
    11386  size,
    11387  false, // upperAddress
    11388  allocInfo.m_hAllocation);
    11389  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11390 
    11391  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11392 
    11393  if(allocInfo.m_pChanged != VMA_NULL)
    11394  {
    11395  *allocInfo.m_pChanged = VK_TRUE;
    11396  }
    11397 
    11398  ++m_AllocationsMoved;
    11399  m_BytesMoved += size;
    11400 
    11401  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11402 
    11403  break;
    11404  }
    11405  }
    11406 
    11407  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11408 
    11409  if(srcAllocIndex > 0)
    11410  {
    11411  --srcAllocIndex;
    11412  }
    11413  else
    11414  {
    11415  if(srcBlockIndex > 0)
    11416  {
    11417  --srcBlockIndex;
    11418  srcAllocIndex = SIZE_MAX;
    11419  }
    11420  else
    11421  {
    11422  return VK_SUCCESS;
    11423  }
    11424  }
    11425  }
    11426 }
    11427 
    11428 VkResult VmaDefragmentator::Defragment(
    11429  VkDeviceSize maxBytesToMove,
    11430  uint32_t maxAllocationsToMove)
    11431 {
    11432  if(m_Allocations.empty())
    11433  {
    11434  return VK_SUCCESS;
    11435  }
    11436 
    11437  // Create block info for each block.
    11438  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11439  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11440  {
    11441  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11442  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11443  m_Blocks.push_back(pBlockInfo);
    11444  }
    11445 
    11446  // Sort them by m_pBlock pointer value.
    11447  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11448 
    11449  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11450  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11451  {
    11452  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11453  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11454  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11455  {
    11456  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11457  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11458  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11459  {
    11460  (*it)->m_Allocations.push_back(allocInfo);
    11461  }
    11462  else
    11463  {
    11464  VMA_ASSERT(0);
    11465  }
    11466  }
    11467  }
    11468  m_Allocations.clear();
    11469 
    11470  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11471  {
    11472  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11473  pBlockInfo->CalcHasNonMovableAllocations();
    11474  pBlockInfo->SortAllocationsBySizeDescecnding();
    11475  }
    11476 
    11477  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11478  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11479 
    11480  // Execute defragmentation rounds (the main part).
    11481  VkResult result = VK_SUCCESS;
    11482  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11483  {
    11484  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11485  }
    11486 
    11487  // Unmap blocks that were mapped for defragmentation.
    11488  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11489  {
    11490  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11491  }
    11492 
    11493  return result;
    11494 }
    11495 
    11496 bool VmaDefragmentator::MoveMakesSense(
    11497  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11498  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11499 {
    11500  if(dstBlockIndex < srcBlockIndex)
    11501  {
    11502  return true;
    11503  }
    11504  if(dstBlockIndex > srcBlockIndex)
    11505  {
    11506  return false;
    11507  }
    11508  if(dstOffset < srcOffset)
    11509  {
    11510  return true;
    11511  }
    11512  return false;
    11513 }
    11514 
    11516 // VmaRecorder
    11517 
    11518 #if VMA_RECORDING_ENABLED
    11519 
    11520 VmaRecorder::VmaRecorder() :
    11521  m_UseMutex(true),
    11522  m_Flags(0),
    11523  m_File(VMA_NULL),
    11524  m_Freq(INT64_MAX),
    11525  m_StartCounter(INT64_MAX)
    11526 {
    11527 }
    11528 
    11529 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11530 {
    11531  m_UseMutex = useMutex;
    11532  m_Flags = settings.flags;
    11533 
    11534  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11535  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11536 
    11537  // Open file for writing.
    11538  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11539  if(err != 0)
    11540  {
    11541  return VK_ERROR_INITIALIZATION_FAILED;
    11542  }
    11543 
    11544  // Write header.
    11545  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11546  fprintf(m_File, "%s\n", "1,4");
    11547 
    11548  return VK_SUCCESS;
    11549 }
    11550 
    11551 VmaRecorder::~VmaRecorder()
    11552 {
    11553  if(m_File != VMA_NULL)
    11554  {
    11555  fclose(m_File);
    11556  }
    11557 }
    11558 
    11559 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11560 {
    11561  CallParams callParams;
    11562  GetBasicParams(callParams);
    11563 
    11564  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11565  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11566  Flush();
    11567 }
    11568 
    11569 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11570 {
    11571  CallParams callParams;
    11572  GetBasicParams(callParams);
    11573 
    11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11575  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11576  Flush();
    11577 }
    11578 
    11579 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11580 {
    11581  CallParams callParams;
    11582  GetBasicParams(callParams);
    11583 
    11584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11585  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11586  createInfo.memoryTypeIndex,
    11587  createInfo.flags,
    11588  createInfo.blockSize,
    11589  (uint64_t)createInfo.minBlockCount,
    11590  (uint64_t)createInfo.maxBlockCount,
    11591  createInfo.frameInUseCount,
    11592  pool);
    11593  Flush();
    11594 }
    11595 
    11596 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11597 {
    11598  CallParams callParams;
    11599  GetBasicParams(callParams);
    11600 
    11601  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11602  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11603  pool);
    11604  Flush();
    11605 }
    11606 
    11607 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11608  const VkMemoryRequirements& vkMemReq,
    11609  const VmaAllocationCreateInfo& createInfo,
    11610  VmaAllocation allocation)
    11611 {
    11612  CallParams callParams;
    11613  GetBasicParams(callParams);
    11614 
    11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11618  vkMemReq.size,
    11619  vkMemReq.alignment,
    11620  vkMemReq.memoryTypeBits,
    11621  createInfo.flags,
    11622  createInfo.usage,
    11623  createInfo.requiredFlags,
    11624  createInfo.preferredFlags,
    11625  createInfo.memoryTypeBits,
    11626  createInfo.pool,
    11627  allocation,
    11628  userDataStr.GetString());
    11629  Flush();
    11630 }
    11631 
    11632 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11633  const VkMemoryRequirements& vkMemReq,
    11634  bool requiresDedicatedAllocation,
    11635  bool prefersDedicatedAllocation,
    11636  const VmaAllocationCreateInfo& createInfo,
    11637  VmaAllocation allocation)
    11638 {
    11639  CallParams callParams;
    11640  GetBasicParams(callParams);
    11641 
    11642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11643  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11644  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11645  vkMemReq.size,
    11646  vkMemReq.alignment,
    11647  vkMemReq.memoryTypeBits,
    11648  requiresDedicatedAllocation ? 1 : 0,
    11649  prefersDedicatedAllocation ? 1 : 0,
    11650  createInfo.flags,
    11651  createInfo.usage,
    11652  createInfo.requiredFlags,
    11653  createInfo.preferredFlags,
    11654  createInfo.memoryTypeBits,
    11655  createInfo.pool,
    11656  allocation,
    11657  userDataStr.GetString());
    11658  Flush();
    11659 }
    11660 
    11661 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11662  const VkMemoryRequirements& vkMemReq,
    11663  bool requiresDedicatedAllocation,
    11664  bool prefersDedicatedAllocation,
    11665  const VmaAllocationCreateInfo& createInfo,
    11666  VmaAllocation allocation)
    11667 {
    11668  CallParams callParams;
    11669  GetBasicParams(callParams);
    11670 
    11671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11674  vkMemReq.size,
    11675  vkMemReq.alignment,
    11676  vkMemReq.memoryTypeBits,
    11677  requiresDedicatedAllocation ? 1 : 0,
    11678  prefersDedicatedAllocation ? 1 : 0,
    11679  createInfo.flags,
    11680  createInfo.usage,
    11681  createInfo.requiredFlags,
    11682  createInfo.preferredFlags,
    11683  createInfo.memoryTypeBits,
    11684  createInfo.pool,
    11685  allocation,
    11686  userDataStr.GetString());
    11687  Flush();
    11688 }
    11689 
    11690 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11691  VmaAllocation allocation)
    11692 {
    11693  CallParams callParams;
    11694  GetBasicParams(callParams);
    11695 
    11696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11697  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11698  allocation);
    11699  Flush();
    11700 }
    11701 
    11702 void VmaRecorder::RecordResizeAllocation(
    11703  uint32_t frameIndex,
    11704  VmaAllocation allocation,
    11705  VkDeviceSize newSize)
    11706 {
    11707  CallParams callParams;
    11708  GetBasicParams(callParams);
    11709 
    11710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11711  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11712  allocation, newSize);
    11713  Flush();
    11714 }
    11715 
    11716 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11717  VmaAllocation allocation,
    11718  const void* pUserData)
    11719 {
    11720  CallParams callParams;
    11721  GetBasicParams(callParams);
    11722 
    11723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11724  UserDataString userDataStr(
    11725  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11726  pUserData);
    11727  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11728  allocation,
    11729  userDataStr.GetString());
    11730  Flush();
    11731 }
    11732 
    11733 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11734  VmaAllocation allocation)
    11735 {
    11736  CallParams callParams;
    11737  GetBasicParams(callParams);
    11738 
    11739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11740  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11741  allocation);
    11742  Flush();
    11743 }
    11744 
    11745 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11746  VmaAllocation allocation)
    11747 {
    11748  CallParams callParams;
    11749  GetBasicParams(callParams);
    11750 
    11751  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11752  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11753  allocation);
    11754  Flush();
    11755 }
    11756 
    11757 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11758  VmaAllocation allocation)
    11759 {
    11760  CallParams callParams;
    11761  GetBasicParams(callParams);
    11762 
    11763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11764  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11765  allocation);
    11766  Flush();
    11767 }
    11768 
    11769 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11770  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11771 {
    11772  CallParams callParams;
    11773  GetBasicParams(callParams);
    11774 
    11775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11776  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11777  allocation,
    11778  offset,
    11779  size);
    11780  Flush();
    11781 }
    11782 
    11783 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11784  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11785 {
    11786  CallParams callParams;
    11787  GetBasicParams(callParams);
    11788 
    11789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11790  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11791  allocation,
    11792  offset,
    11793  size);
    11794  Flush();
    11795 }
    11796 
    11797 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11798  const VkBufferCreateInfo& bufCreateInfo,
    11799  const VmaAllocationCreateInfo& allocCreateInfo,
    11800  VmaAllocation allocation)
    11801 {
    11802  CallParams callParams;
    11803  GetBasicParams(callParams);
    11804 
    11805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11806  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11807  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11808  bufCreateInfo.flags,
    11809  bufCreateInfo.size,
    11810  bufCreateInfo.usage,
    11811  bufCreateInfo.sharingMode,
    11812  allocCreateInfo.flags,
    11813  allocCreateInfo.usage,
    11814  allocCreateInfo.requiredFlags,
    11815  allocCreateInfo.preferredFlags,
    11816  allocCreateInfo.memoryTypeBits,
    11817  allocCreateInfo.pool,
    11818  allocation,
    11819  userDataStr.GetString());
    11820  Flush();
    11821 }
    11822 
    11823 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11824  const VkImageCreateInfo& imageCreateInfo,
    11825  const VmaAllocationCreateInfo& allocCreateInfo,
    11826  VmaAllocation allocation)
    11827 {
    11828  CallParams callParams;
    11829  GetBasicParams(callParams);
    11830 
    11831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11832  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11833  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11834  imageCreateInfo.flags,
    11835  imageCreateInfo.imageType,
    11836  imageCreateInfo.format,
    11837  imageCreateInfo.extent.width,
    11838  imageCreateInfo.extent.height,
    11839  imageCreateInfo.extent.depth,
    11840  imageCreateInfo.mipLevels,
    11841  imageCreateInfo.arrayLayers,
    11842  imageCreateInfo.samples,
    11843  imageCreateInfo.tiling,
    11844  imageCreateInfo.usage,
    11845  imageCreateInfo.sharingMode,
    11846  imageCreateInfo.initialLayout,
    11847  allocCreateInfo.flags,
    11848  allocCreateInfo.usage,
    11849  allocCreateInfo.requiredFlags,
    11850  allocCreateInfo.preferredFlags,
    11851  allocCreateInfo.memoryTypeBits,
    11852  allocCreateInfo.pool,
    11853  allocation,
    11854  userDataStr.GetString());
    11855  Flush();
    11856 }
    11857 
    11858 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11859  VmaAllocation allocation)
    11860 {
    11861  CallParams callParams;
    11862  GetBasicParams(callParams);
    11863 
    11864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11865  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11866  allocation);
    11867  Flush();
    11868 }
    11869 
    11870 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11871  VmaAllocation allocation)
    11872 {
    11873  CallParams callParams;
    11874  GetBasicParams(callParams);
    11875 
    11876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11877  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11878  allocation);
    11879  Flush();
    11880 }
    11881 
    11882 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11883  VmaAllocation allocation)
    11884 {
    11885  CallParams callParams;
    11886  GetBasicParams(callParams);
    11887 
    11888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11889  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11890  allocation);
    11891  Flush();
    11892 }
    11893 
    11894 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11895  VmaAllocation allocation)
    11896 {
    11897  CallParams callParams;
    11898  GetBasicParams(callParams);
    11899 
    11900  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11901  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11902  allocation);
    11903  Flush();
    11904 }
    11905 
    11906 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11907  VmaPool pool)
    11908 {
    11909  CallParams callParams;
    11910  GetBasicParams(callParams);
    11911 
    11912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11913  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11914  pool);
    11915  Flush();
    11916 }
    11917 
    11918 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11919 {
    11920  if(pUserData != VMA_NULL)
    11921  {
    11922  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11923  {
    11924  m_Str = (const char*)pUserData;
    11925  }
    11926  else
    11927  {
    11928  sprintf_s(m_PtrStr, "%p", pUserData);
    11929  m_Str = m_PtrStr;
    11930  }
    11931  }
    11932  else
    11933  {
    11934  m_Str = "";
    11935  }
    11936 }
    11937 
    11938 void VmaRecorder::WriteConfiguration(
    11939  const VkPhysicalDeviceProperties& devProps,
    11940  const VkPhysicalDeviceMemoryProperties& memProps,
    11941  bool dedicatedAllocationExtensionEnabled)
    11942 {
    11943  fprintf(m_File, "Config,Begin\n");
    11944 
    11945  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11946  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11947  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11948  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11949  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11950  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11951 
    11952  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11953  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11954  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11955 
    11956  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11957  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11958  {
    11959  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11960  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11961  }
    11962  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11963  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11964  {
    11965  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11966  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11967  }
    11968 
    11969  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11970 
    11971  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11972  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11973  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11974  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11978  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11979  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11980 
    11981  fprintf(m_File, "Config,End\n");
    11982 }
    11983 
    11984 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11985 {
    11986  outParams.threadId = GetCurrentThreadId();
    11987 
    11988  LARGE_INTEGER counter;
    11989  QueryPerformanceCounter(&counter);
    11990  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11991 }
    11992 
    11993 void VmaRecorder::Flush()
    11994 {
    11995  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11996  {
    11997  fflush(m_File);
    11998  }
    11999 }
    12000 
    12001 #endif // #if VMA_RECORDING_ENABLED
    12002 
    12004 // VmaAllocator_T
    12005 
    12006 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12007  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12008  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12009  m_hDevice(pCreateInfo->device),
    12010  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12011  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12012  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12013  m_PreferredLargeHeapBlockSize(0),
    12014  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12015  m_CurrentFrameIndex(0),
    12016  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12017  m_NextPoolId(0)
    12019  ,m_pRecorder(VMA_NULL)
    12020 #endif
    12021 {
    12022  if(VMA_DEBUG_DETECT_CORRUPTION)
    12023  {
    12024  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12025  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12026  }
    12027 
    12028  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12029 
    12030 #if !(VMA_DEDICATED_ALLOCATION)
    12032  {
    12033  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12034  }
    12035 #endif
    12036 
    12037  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12038  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12039  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12040 
    12041  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12042  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12043 
    12044  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12045  {
    12046  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12047  }
    12048 
    12049  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12050  {
    12051  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12052  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12053  }
    12054 
    12055  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12056 
    12057  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12058  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12059 
    12060  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12062  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12064 
    12065  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12066  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12067 
    12068  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12069  {
    12070  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12071  {
    12072  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12073  if(limit != VK_WHOLE_SIZE)
    12074  {
    12075  m_HeapSizeLimit[heapIndex] = limit;
    12076  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12077  {
    12078  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12079  }
    12080  }
    12081  }
    12082  }
    12083 
    12084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12085  {
    12086  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12087 
    12088  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12089  this,
    12090  memTypeIndex,
    12091  preferredBlockSize,
    12092  0,
    12093  SIZE_MAX,
    12094  GetBufferImageGranularity(),
    12095  pCreateInfo->frameInUseCount,
    12096  false, // isCustomPool
    12097  false, // explicitBlockSize
    12098  false); // linearAlgorithm
    12099  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12100  // becase minBlockCount is 0.
    12101  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12102 
    12103  }
    12104 }
    12105 
    12106 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12107 {
    12108  VkResult res = VK_SUCCESS;
    12109 
    12110  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12111  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12112  {
    12113 #if VMA_RECORDING_ENABLED
    12114  m_pRecorder = vma_new(this, VmaRecorder)();
    12115  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12116  if(res != VK_SUCCESS)
    12117  {
    12118  return res;
    12119  }
    12120  m_pRecorder->WriteConfiguration(
    12121  m_PhysicalDeviceProperties,
    12122  m_MemProps,
    12123  m_UseKhrDedicatedAllocation);
    12124  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12125 #else
    12126  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12127  return VK_ERROR_FEATURE_NOT_PRESENT;
    12128 #endif
    12129  }
    12130 
    12131  return res;
    12132 }
    12133 
    12134 VmaAllocator_T::~VmaAllocator_T()
    12135 {
    12136 #if VMA_RECORDING_ENABLED
    12137  if(m_pRecorder != VMA_NULL)
    12138  {
    12139  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12140  vma_delete(this, m_pRecorder);
    12141  }
    12142 #endif
    12143 
    12144  VMA_ASSERT(m_Pools.empty());
    12145 
    12146  for(size_t i = GetMemoryTypeCount(); i--; )
    12147  {
    12148  vma_delete(this, m_pDedicatedAllocations[i]);
    12149  vma_delete(this, m_pBlockVectors[i]);
    12150  }
    12151 }
    12152 
    12153 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12154 {
    12155 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12156  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12157  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12158  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12159  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12160  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12161  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12162  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12163  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12164  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12165  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12166  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12167  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12168  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12169  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12170  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12171  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12172 #if VMA_DEDICATED_ALLOCATION
    12173  if(m_UseKhrDedicatedAllocation)
    12174  {
    12175  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12176  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12177  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12178  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12179  }
    12180 #endif // #if VMA_DEDICATED_ALLOCATION
    12181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12182 
    12183 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12184  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12185 
    12186  if(pVulkanFunctions != VMA_NULL)
    12187  {
    12188  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12189  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12190  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12191  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12192  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12193  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12194  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12195  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12196  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12199  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12200  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12201  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12202  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12203  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12204 #if VMA_DEDICATED_ALLOCATION
    12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12207 #endif
    12208  }
    12209 
    12210 #undef VMA_COPY_IF_NOT_NULL
    12211 
    12212  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12213  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12214  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12215  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12216  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12217  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12230 #if VMA_DEDICATED_ALLOCATION
    12231  if(m_UseKhrDedicatedAllocation)
    12232  {
    12233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12235  }
    12236 #endif
    12237 }
    12238 
    12239 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12240 {
    12241  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12242  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12243  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12244  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12245 }
    12246 
    12247 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12248  VkDeviceSize size,
    12249  VkDeviceSize alignment,
    12250  bool dedicatedAllocation,
    12251  VkBuffer dedicatedBuffer,
    12252  VkImage dedicatedImage,
    12253  const VmaAllocationCreateInfo& createInfo,
    12254  uint32_t memTypeIndex,
    12255  VmaSuballocationType suballocType,
    12256  VmaAllocation* pAllocation)
    12257 {
    12258  VMA_ASSERT(pAllocation != VMA_NULL);
    12259  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12260 
    12261  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12262 
    12263  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12264  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12265  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12266  {
    12267  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12268  }
    12269 
    12270  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12271  VMA_ASSERT(blockVector);
    12272 
    12273  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12274  bool preferDedicatedMemory =
    12275  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12276  dedicatedAllocation ||
    12277  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12278  size > preferredBlockSize / 2;
    12279 
    12280  if(preferDedicatedMemory &&
    12281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12282  finalCreateInfo.pool == VK_NULL_HANDLE)
    12283  {
    12285  }
    12286 
    12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12288  {
    12289  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12290  {
    12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12292  }
    12293  else
    12294  {
    12295  return AllocateDedicatedMemory(
    12296  size,
    12297  suballocType,
    12298  memTypeIndex,
    12299  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12301  finalCreateInfo.pUserData,
    12302  dedicatedBuffer,
    12303  dedicatedImage,
    12304  pAllocation);
    12305  }
    12306  }
    12307  else
    12308  {
    12309  VkResult res = blockVector->Allocate(
    12310  VK_NULL_HANDLE, // hCurrentPool
    12311  m_CurrentFrameIndex.load(),
    12312  size,
    12313  alignment,
    12314  finalCreateInfo,
    12315  suballocType,
    12316  pAllocation);
    12317  if(res == VK_SUCCESS)
    12318  {
    12319  return res;
    12320  }
    12321 
    12322  // 5. Try dedicated memory.
    12323  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12324  {
    12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12326  }
    12327  else
    12328  {
    12329  res = AllocateDedicatedMemory(
    12330  size,
    12331  suballocType,
    12332  memTypeIndex,
    12333  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12334  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12335  finalCreateInfo.pUserData,
    12336  dedicatedBuffer,
    12337  dedicatedImage,
    12338  pAllocation);
    12339  if(res == VK_SUCCESS)
    12340  {
    12341  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12342  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12343  return VK_SUCCESS;
    12344  }
    12345  else
    12346  {
    12347  // Everything failed: Return error code.
    12348  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12349  return res;
    12350  }
    12351  }
    12352  }
    12353 }
    12354 
    12355 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12356  VkDeviceSize size,
    12357  VmaSuballocationType suballocType,
    12358  uint32_t memTypeIndex,
    12359  bool map,
    12360  bool isUserDataString,
    12361  void* pUserData,
    12362  VkBuffer dedicatedBuffer,
    12363  VkImage dedicatedImage,
    12364  VmaAllocation* pAllocation)
    12365 {
    12366  VMA_ASSERT(pAllocation);
    12367 
    12368  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12369  allocInfo.memoryTypeIndex = memTypeIndex;
    12370  allocInfo.allocationSize = size;
    12371 
    12372 #if VMA_DEDICATED_ALLOCATION
    12373  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12374  if(m_UseKhrDedicatedAllocation)
    12375  {
    12376  if(dedicatedBuffer != VK_NULL_HANDLE)
    12377  {
    12378  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12379  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12380  allocInfo.pNext = &dedicatedAllocInfo;
    12381  }
    12382  else if(dedicatedImage != VK_NULL_HANDLE)
    12383  {
    12384  dedicatedAllocInfo.image = dedicatedImage;
    12385  allocInfo.pNext = &dedicatedAllocInfo;
    12386  }
    12387  }
    12388 #endif // #if VMA_DEDICATED_ALLOCATION
    12389 
    12390  // Allocate VkDeviceMemory.
    12391  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12392  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12393  if(res < 0)
    12394  {
    12395  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12396  return res;
    12397  }
    12398 
    12399  void* pMappedData = VMA_NULL;
    12400  if(map)
    12401  {
    12402  res = (*m_VulkanFunctions.vkMapMemory)(
    12403  m_hDevice,
    12404  hMemory,
    12405  0,
    12406  VK_WHOLE_SIZE,
    12407  0,
    12408  &pMappedData);
    12409  if(res < 0)
    12410  {
    12411  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12412  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12413  return res;
    12414  }
    12415  }
    12416 
    12417  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12418  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12419  (*pAllocation)->SetUserData(this, pUserData);
    12420  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12421  {
    12422  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12423  }
    12424 
    12425  // Register it in m_pDedicatedAllocations.
    12426  {
    12427  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12428  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12429  VMA_ASSERT(pDedicatedAllocations);
    12430  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12431  }
    12432 
    12433  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12434 
    12435  return VK_SUCCESS;
    12436 }
    12437 
    12438 void VmaAllocator_T::GetBufferMemoryRequirements(
    12439  VkBuffer hBuffer,
    12440  VkMemoryRequirements& memReq,
    12441  bool& requiresDedicatedAllocation,
    12442  bool& prefersDedicatedAllocation) const
    12443 {
    12444 #if VMA_DEDICATED_ALLOCATION
    12445  if(m_UseKhrDedicatedAllocation)
    12446  {
    12447  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12448  memReqInfo.buffer = hBuffer;
    12449 
    12450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12451 
    12452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12453  memReq2.pNext = &memDedicatedReq;
    12454 
    12455  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12456 
    12457  memReq = memReq2.memoryRequirements;
    12458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12460  }
    12461  else
    12462 #endif // #if VMA_DEDICATED_ALLOCATION
    12463  {
    12464  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12465  requiresDedicatedAllocation = false;
    12466  prefersDedicatedAllocation = false;
    12467  }
    12468 }
    12469 
    12470 void VmaAllocator_T::GetImageMemoryRequirements(
    12471  VkImage hImage,
    12472  VkMemoryRequirements& memReq,
    12473  bool& requiresDedicatedAllocation,
    12474  bool& prefersDedicatedAllocation) const
    12475 {
    12476 #if VMA_DEDICATED_ALLOCATION
    12477  if(m_UseKhrDedicatedAllocation)
    12478  {
    12479  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12480  memReqInfo.image = hImage;
    12481 
    12482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12483 
    12484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12485  memReq2.pNext = &memDedicatedReq;
    12486 
    12487  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12488 
    12489  memReq = memReq2.memoryRequirements;
    12490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12492  }
    12493  else
    12494 #endif // #if VMA_DEDICATED_ALLOCATION
    12495  {
    12496  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12497  requiresDedicatedAllocation = false;
    12498  prefersDedicatedAllocation = false;
    12499  }
    12500 }
    12501 
    12502 VkResult VmaAllocator_T::AllocateMemory(
    12503  const VkMemoryRequirements& vkMemReq,
    12504  bool requiresDedicatedAllocation,
    12505  bool prefersDedicatedAllocation,
    12506  VkBuffer dedicatedBuffer,
    12507  VkImage dedicatedImage,
    12508  const VmaAllocationCreateInfo& createInfo,
    12509  VmaSuballocationType suballocType,
    12510  VmaAllocation* pAllocation)
    12511 {
    12512  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12513 
    12514  if(vkMemReq.size == 0)
    12515  {
    12516  return VK_ERROR_VALIDATION_FAILED_EXT;
    12517  }
    12518  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12519  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12520  {
    12521  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12522  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12523  }
    12524  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12526  {
    12527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12529  }
    12530  if(requiresDedicatedAllocation)
    12531  {
    12532  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12533  {
    12534  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12536  }
    12537  if(createInfo.pool != VK_NULL_HANDLE)
    12538  {
    12539  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12541  }
    12542  }
    12543  if((createInfo.pool != VK_NULL_HANDLE) &&
    12544  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12545  {
    12546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12548  }
    12549 
    12550  if(createInfo.pool != VK_NULL_HANDLE)
    12551  {
    12552  const VkDeviceSize alignmentForPool = VMA_MAX(
    12553  vkMemReq.alignment,
    12554  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12555  return createInfo.pool->m_BlockVector.Allocate(
    12556  createInfo.pool,
    12557  m_CurrentFrameIndex.load(),
    12558  vkMemReq.size,
    12559  alignmentForPool,
    12560  createInfo,
    12561  suballocType,
    12562  pAllocation);
    12563  }
    12564  else
    12565  {
    12566  // Bit mask of memory Vulkan types acceptable for this allocation.
    12567  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12568  uint32_t memTypeIndex = UINT32_MAX;
    12569  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12570  if(res == VK_SUCCESS)
    12571  {
    12572  VkDeviceSize alignmentForMemType = VMA_MAX(
    12573  vkMemReq.alignment,
    12574  GetMemoryTypeMinAlignment(memTypeIndex));
    12575 
    12576  res = AllocateMemoryOfType(
    12577  vkMemReq.size,
    12578  alignmentForMemType,
    12579  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12580  dedicatedBuffer,
    12581  dedicatedImage,
    12582  createInfo,
    12583  memTypeIndex,
    12584  suballocType,
    12585  pAllocation);
    12586  // Succeeded on first try.
    12587  if(res == VK_SUCCESS)
    12588  {
    12589  return res;
    12590  }
    12591  // Allocation from this memory type failed. Try other compatible memory types.
    12592  else
    12593  {
    12594  for(;;)
    12595  {
    12596  // Remove old memTypeIndex from list of possibilities.
    12597  memoryTypeBits &= ~(1u << memTypeIndex);
    12598  // Find alternative memTypeIndex.
    12599  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12600  if(res == VK_SUCCESS)
    12601  {
    12602  alignmentForMemType = VMA_MAX(
    12603  vkMemReq.alignment,
    12604  GetMemoryTypeMinAlignment(memTypeIndex));
    12605 
    12606  res = AllocateMemoryOfType(
    12607  vkMemReq.size,
    12608  alignmentForMemType,
    12609  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12610  dedicatedBuffer,
    12611  dedicatedImage,
    12612  createInfo,
    12613  memTypeIndex,
    12614  suballocType,
    12615  pAllocation);
    12616  // Allocation from this alternative memory type succeeded.
    12617  if(res == VK_SUCCESS)
    12618  {
    12619  return res;
    12620  }
    12621  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12622  }
    12623  // No other matching memory type index could be found.
    12624  else
    12625  {
    12626  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12627  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12628  }
    12629  }
    12630  }
    12631  }
    12632  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12633  else
    12634  return res;
    12635  }
    12636 }
    12637 
    12638 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12639 {
    12640  VMA_ASSERT(allocation);
    12641 
    12642  if(TouchAllocation(allocation))
    12643  {
    12644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12645  {
    12646  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12647  }
    12648 
    12649  switch(allocation->GetType())
    12650  {
    12651  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12652  {
    12653  VmaBlockVector* pBlockVector = VMA_NULL;
    12654  VmaPool hPool = allocation->GetPool();
    12655  if(hPool != VK_NULL_HANDLE)
    12656  {
    12657  pBlockVector = &hPool->m_BlockVector;
    12658  }
    12659  else
    12660  {
    12661  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12662  pBlockVector = m_pBlockVectors[memTypeIndex];
    12663  }
    12664  pBlockVector->Free(allocation);
    12665  }
    12666  break;
    12667  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12668  FreeDedicatedMemory(allocation);
    12669  break;
    12670  default:
    12671  VMA_ASSERT(0);
    12672  }
    12673  }
    12674 
    12675  allocation->SetUserData(this, VMA_NULL);
    12676  vma_delete(this, allocation);
    12677 }
    12678 
    12679 VkResult VmaAllocator_T::ResizeAllocation(
    12680  const VmaAllocation alloc,
    12681  VkDeviceSize newSize)
    12682 {
    12683  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12684  {
    12685  return VK_ERROR_VALIDATION_FAILED_EXT;
    12686  }
    12687  if(newSize == alloc->GetSize())
    12688  {
    12689  return VK_SUCCESS;
    12690  }
    12691 
    12692  switch(alloc->GetType())
    12693  {
    12694  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12695  return VK_ERROR_FEATURE_NOT_PRESENT;
    12696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12697  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12698  {
    12699  alloc->ChangeSize(newSize);
    12700  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12701  return VK_SUCCESS;
    12702  }
    12703  else
    12704  {
    12705  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12706  }
    12707  default:
    12708  VMA_ASSERT(0);
    12709  return VK_ERROR_VALIDATION_FAILED_EXT;
    12710  }
    12711 }
    12712 
    12713 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12714 {
    12715  // Initialize.
    12716  InitStatInfo(pStats->total);
    12717  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12718  InitStatInfo(pStats->memoryType[i]);
    12719  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12720  InitStatInfo(pStats->memoryHeap[i]);
    12721 
    12722  // Process default pools.
    12723  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12724  {
    12725  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12726  VMA_ASSERT(pBlockVector);
    12727  pBlockVector->AddStats(pStats);
    12728  }
    12729 
    12730  // Process custom pools.
    12731  {
    12732  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12733  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12734  {
    12735  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12736  }
    12737  }
    12738 
    12739  // Process dedicated allocations.
    12740  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12741  {
    12742  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12743  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12744  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12745  VMA_ASSERT(pDedicatedAllocVector);
    12746  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12747  {
    12748  VmaStatInfo allocationStatInfo;
    12749  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12750  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12753  }
    12754  }
    12755 
    12756  // Postprocess.
    12757  VmaPostprocessCalcStatInfo(pStats->total);
    12758  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12759  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12760  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12761  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12762 }
    12763 
    12764 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12765 
    12766 VkResult VmaAllocator_T::Defragment(
    12767  VmaAllocation* pAllocations,
    12768  size_t allocationCount,
    12769  VkBool32* pAllocationsChanged,
    12770  const VmaDefragmentationInfo* pDefragmentationInfo,
    12771  VmaDefragmentationStats* pDefragmentationStats)
    12772 {
    12773  if(pAllocationsChanged != VMA_NULL)
    12774  {
    12775  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12776  }
    12777  if(pDefragmentationStats != VMA_NULL)
    12778  {
    12779  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12780  }
    12781 
    12782  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12783 
    12784  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12785 
    12786  const size_t poolCount = m_Pools.size();
    12787 
    12788  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12789  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12790  {
    12791  VmaAllocation hAlloc = pAllocations[allocIndex];
    12792  VMA_ASSERT(hAlloc);
    12793  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12794  // DedicatedAlloc cannot be defragmented.
    12795  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12796  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12797  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12798  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12799  // Lost allocation cannot be defragmented.
    12800  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12801  {
    12802  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12803 
    12804  const VmaPool hAllocPool = hAlloc->GetPool();
    12805  // This allocation belongs to custom pool.
    12806  if(hAllocPool != VK_NULL_HANDLE)
    12807  {
    12808  // Pools with linear or buddy algorithm are not defragmented.
    12809  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12810  {
    12811  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12812  }
    12813  }
    12814  // This allocation belongs to general pool.
    12815  else
    12816  {
    12817  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12818  }
    12819 
    12820  if(pAllocBlockVector != VMA_NULL)
    12821  {
    12822  VmaDefragmentator* const pDefragmentator =
    12823  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12824  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12825  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12826  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12827  }
    12828  }
    12829  }
    12830 
    12831  VkResult result = VK_SUCCESS;
    12832 
    12833  // ======== Main processing.
    12834 
    12835  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12836  uint32_t maxAllocationsToMove = UINT32_MAX;
    12837  if(pDefragmentationInfo != VMA_NULL)
    12838  {
    12839  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12840  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12841  }
    12842 
    12843  // Process standard memory.
    12844  for(uint32_t memTypeIndex = 0;
    12845  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12846  ++memTypeIndex)
    12847  {
    12848  // Only HOST_VISIBLE memory types can be defragmented.
    12849  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12850  {
    12851  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12852  pDefragmentationStats,
    12853  maxBytesToMove,
    12854  maxAllocationsToMove);
    12855  }
    12856  }
    12857 
    12858  // Process custom pools.
    12859  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12860  {
    12861  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12862  pDefragmentationStats,
    12863  maxBytesToMove,
    12864  maxAllocationsToMove);
    12865  }
    12866 
    12867  // ======== Destroy defragmentators.
    12868 
    12869  // Process custom pools.
    12870  for(size_t poolIndex = poolCount; poolIndex--; )
    12871  {
    12872  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12873  }
    12874 
    12875  // Process standard memory.
    12876  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12877  {
    12878  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12879  {
    12880  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12881  }
    12882  }
    12883 
    12884  return result;
    12885 }
    12886 
    12887 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12888 {
    12889  if(hAllocation->CanBecomeLost())
    12890  {
    12891  /*
    12892  Warning: This is a carefully designed algorithm.
    12893  Do not modify unless you really know what you're doing :)
    12894  */
    12895  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12896  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12897  for(;;)
    12898  {
    12899  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12900  {
    12901  pAllocationInfo->memoryType = UINT32_MAX;
    12902  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12903  pAllocationInfo->offset = 0;
    12904  pAllocationInfo->size = hAllocation->GetSize();
    12905  pAllocationInfo->pMappedData = VMA_NULL;
    12906  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12907  return;
    12908  }
    12909  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12910  {
    12911  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12912  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12913  pAllocationInfo->offset = hAllocation->GetOffset();
    12914  pAllocationInfo->size = hAllocation->GetSize();
    12915  pAllocationInfo->pMappedData = VMA_NULL;
    12916  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12917  return;
    12918  }
    12919  else // Last use time earlier than current time.
    12920  {
    12921  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12922  {
    12923  localLastUseFrameIndex = localCurrFrameIndex;
    12924  }
    12925  }
    12926  }
    12927  }
    12928  else
    12929  {
    12930 #if VMA_STATS_STRING_ENABLED
    12931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12933  for(;;)
    12934  {
    12935  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12936  if(localLastUseFrameIndex == localCurrFrameIndex)
    12937  {
    12938  break;
    12939  }
    12940  else // Last use time earlier than current time.
    12941  {
    12942  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12943  {
    12944  localLastUseFrameIndex = localCurrFrameIndex;
    12945  }
    12946  }
    12947  }
    12948 #endif
    12949 
    12950  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12951  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12952  pAllocationInfo->offset = hAllocation->GetOffset();
    12953  pAllocationInfo->size = hAllocation->GetSize();
    12954  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12955  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12956  }
    12957 }
    12958 
    12959 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12960 {
    12961  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12962  if(hAllocation->CanBecomeLost())
    12963  {
    12964  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12965  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12966  for(;;)
    12967  {
    12968  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12969  {
    12970  return false;
    12971  }
    12972  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12973  {
    12974  return true;
    12975  }
    12976  else // Last use time earlier than current time.
    12977  {
    12978  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12979  {
    12980  localLastUseFrameIndex = localCurrFrameIndex;
    12981  }
    12982  }
    12983  }
    12984  }
    12985  else
    12986  {
    12987 #if VMA_STATS_STRING_ENABLED
    12988  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12989  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12990  for(;;)
    12991  {
    12992  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12993  if(localLastUseFrameIndex == localCurrFrameIndex)
    12994  {
    12995  break;
    12996  }
    12997  else // Last use time earlier than current time.
    12998  {
    12999  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13000  {
    13001  localLastUseFrameIndex = localCurrFrameIndex;
    13002  }
    13003  }
    13004  }
    13005 #endif
    13006 
    13007  return true;
    13008  }
    13009 }
    13010 
    13011 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13012 {
    13013  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13014 
    13015  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13016 
    13017  if(newCreateInfo.maxBlockCount == 0)
    13018  {
    13019  newCreateInfo.maxBlockCount = SIZE_MAX;
    13020  }
    13021  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13022  {
    13023  return VK_ERROR_INITIALIZATION_FAILED;
    13024  }
    13025 
    13026  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13027 
    13028  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13029 
    13030  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13031  if(res != VK_SUCCESS)
    13032  {
    13033  vma_delete(this, *pPool);
    13034  *pPool = VMA_NULL;
    13035  return res;
    13036  }
    13037 
    13038  // Add to m_Pools.
    13039  {
    13040  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13041  (*pPool)->SetId(m_NextPoolId++);
    13042  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13043  }
    13044 
    13045  return VK_SUCCESS;
    13046 }
    13047 
    13048 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13049 {
    13050  // Remove from m_Pools.
    13051  {
    13052  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13053  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13054  VMA_ASSERT(success && "Pool not found in Allocator.");
    13055  }
    13056 
    13057  vma_delete(this, pool);
    13058 }
    13059 
    13060 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13061 {
    13062  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13063 }
    13064 
    13065 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13066 {
    13067  m_CurrentFrameIndex.store(frameIndex);
    13068 }
    13069 
    13070 void VmaAllocator_T::MakePoolAllocationsLost(
    13071  VmaPool hPool,
    13072  size_t* pLostAllocationCount)
    13073 {
    13074  hPool->m_BlockVector.MakePoolAllocationsLost(
    13075  m_CurrentFrameIndex.load(),
    13076  pLostAllocationCount);
    13077 }
    13078 
    13079 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13080 {
    13081  return hPool->m_BlockVector.CheckCorruption();
    13082 }
    13083 
    13084 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13085 {
    13086  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13087 
    13088  // Process default pools.
    13089  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13090  {
    13091  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13092  {
    13093  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13094  VMA_ASSERT(pBlockVector);
    13095  VkResult localRes = pBlockVector->CheckCorruption();
    13096  switch(localRes)
    13097  {
    13098  case VK_ERROR_FEATURE_NOT_PRESENT:
    13099  break;
    13100  case VK_SUCCESS:
    13101  finalRes = VK_SUCCESS;
    13102  break;
    13103  default:
    13104  return localRes;
    13105  }
    13106  }
    13107  }
    13108 
    13109  // Process custom pools.
    13110  {
    13111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13112  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13113  {
    13114  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13115  {
    13116  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13117  switch(localRes)
    13118  {
    13119  case VK_ERROR_FEATURE_NOT_PRESENT:
    13120  break;
    13121  case VK_SUCCESS:
    13122  finalRes = VK_SUCCESS;
    13123  break;
    13124  default:
    13125  return localRes;
    13126  }
    13127  }
    13128  }
    13129  }
    13130 
    13131  return finalRes;
    13132 }
    13133 
    13134 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13135 {
    13136  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13137  (*pAllocation)->InitLost();
    13138 }
    13139 
    13140 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13141 {
    13142  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13143 
    13144  VkResult res;
    13145  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13146  {
    13147  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13148  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13149  {
    13150  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13151  if(res == VK_SUCCESS)
    13152  {
    13153  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13154  }
    13155  }
    13156  else
    13157  {
    13158  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13159  }
    13160  }
    13161  else
    13162  {
    13163  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13164  }
    13165 
    13166  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13167  {
    13168  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13169  }
    13170 
    13171  return res;
    13172 }
    13173 
    13174 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13175 {
    13176  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13177  {
    13178  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13179  }
    13180 
    13181  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13182 
    13183  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13184  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13185  {
    13186  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13187  m_HeapSizeLimit[heapIndex] += size;
    13188  }
    13189 }
    13190 
    13191 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13192 {
    13193  if(hAllocation->CanBecomeLost())
    13194  {
    13195  return VK_ERROR_MEMORY_MAP_FAILED;
    13196  }
    13197 
    13198  switch(hAllocation->GetType())
    13199  {
    13200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13201  {
    13202  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13203  char *pBytes = VMA_NULL;
    13204  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13205  if(res == VK_SUCCESS)
    13206  {
    13207  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13208  hAllocation->BlockAllocMap();
    13209  }
    13210  return res;
    13211  }
    13212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13213  return hAllocation->DedicatedAllocMap(this, ppData);
    13214  default:
    13215  VMA_ASSERT(0);
    13216  return VK_ERROR_MEMORY_MAP_FAILED;
    13217  }
    13218 }
    13219 
    13220 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13221 {
    13222  switch(hAllocation->GetType())
    13223  {
    13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13225  {
    13226  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13227  hAllocation->BlockAllocUnmap();
    13228  pBlock->Unmap(this, 1);
    13229  }
    13230  break;
    13231  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13232  hAllocation->DedicatedAllocUnmap(this);
    13233  break;
    13234  default:
    13235  VMA_ASSERT(0);
    13236  }
    13237 }
    13238 
    13239 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13240 {
    13241  VkResult res = VK_SUCCESS;
    13242  switch(hAllocation->GetType())
    13243  {
    13244  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13245  res = GetVulkanFunctions().vkBindBufferMemory(
    13246  m_hDevice,
    13247  hBuffer,
    13248  hAllocation->GetMemory(),
    13249  0); //memoryOffset
    13250  break;
    13251  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13252  {
    13253  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13254  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13255  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13256  break;
    13257  }
    13258  default:
    13259  VMA_ASSERT(0);
    13260  }
    13261  return res;
    13262 }
    13263 
    13264 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13265 {
    13266  VkResult res = VK_SUCCESS;
    13267  switch(hAllocation->GetType())
    13268  {
    13269  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13270  res = GetVulkanFunctions().vkBindImageMemory(
    13271  m_hDevice,
    13272  hImage,
    13273  hAllocation->GetMemory(),
    13274  0); //memoryOffset
    13275  break;
    13276  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13277  {
    13278  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13279  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13280  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13281  break;
    13282  }
    13283  default:
    13284  VMA_ASSERT(0);
    13285  }
    13286  return res;
    13287 }
    13288 
    13289 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13290  VmaAllocation hAllocation,
    13291  VkDeviceSize offset, VkDeviceSize size,
    13292  VMA_CACHE_OPERATION op)
    13293 {
    13294  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13295  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13296  {
    13297  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13298  VMA_ASSERT(offset <= allocationSize);
    13299 
    13300  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13301 
    13302  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13303  memRange.memory = hAllocation->GetMemory();
    13304 
    13305  switch(hAllocation->GetType())
    13306  {
    13307  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13308  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13309  if(size == VK_WHOLE_SIZE)
    13310  {
    13311  memRange.size = allocationSize - memRange.offset;
    13312  }
    13313  else
    13314  {
    13315  VMA_ASSERT(offset + size <= allocationSize);
    13316  memRange.size = VMA_MIN(
    13317  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13318  allocationSize - memRange.offset);
    13319  }
    13320  break;
    13321 
    13322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13323  {
    13324  // 1. Still within this allocation.
    13325  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13326  if(size == VK_WHOLE_SIZE)
    13327  {
    13328  size = allocationSize - offset;
    13329  }
    13330  else
    13331  {
    13332  VMA_ASSERT(offset + size <= allocationSize);
    13333  }
    13334  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13335 
    13336  // 2. Adjust to whole block.
    13337  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13338  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13339  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13340  memRange.offset += allocationOffset;
    13341  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13342 
    13343  break;
    13344  }
    13345 
    13346  default:
    13347  VMA_ASSERT(0);
    13348  }
    13349 
    13350  switch(op)
    13351  {
    13352  case VMA_CACHE_FLUSH:
    13353  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13354  break;
    13355  case VMA_CACHE_INVALIDATE:
    13356  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13357  break;
    13358  default:
    13359  VMA_ASSERT(0);
    13360  }
    13361  }
    13362  // else: Just ignore this call.
    13363 }
    13364 
    13365 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13366 {
    13367  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13368 
    13369  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13370  {
    13371  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13372  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13373  VMA_ASSERT(pDedicatedAllocations);
    13374  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13375  VMA_ASSERT(success);
    13376  }
    13377 
    13378  VkDeviceMemory hMemory = allocation->GetMemory();
    13379 
    13380  /*
    13381  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13382  before vkFreeMemory.
    13383 
    13384  if(allocation->GetMappedData() != VMA_NULL)
    13385  {
    13386  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13387  }
    13388  */
    13389 
    13390  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13391 
    13392  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13393 }
    13394 
    13395 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13396 {
    13397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13398  !hAllocation->CanBecomeLost() &&
    13399  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13400  {
    13401  void* pData = VMA_NULL;
    13402  VkResult res = Map(hAllocation, &pData);
    13403  if(res == VK_SUCCESS)
    13404  {
    13405  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13406  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13407  Unmap(hAllocation);
    13408  }
    13409  else
    13410  {
    13411  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13412  }
    13413  }
    13414 }
    13415 
    13416 #if VMA_STATS_STRING_ENABLED
    13417 
    13418 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13419 {
    13420  bool dedicatedAllocationsStarted = false;
    13421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13422  {
    13423  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13424  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13425  VMA_ASSERT(pDedicatedAllocVector);
    13426  if(pDedicatedAllocVector->empty() == false)
    13427  {
    13428  if(dedicatedAllocationsStarted == false)
    13429  {
    13430  dedicatedAllocationsStarted = true;
    13431  json.WriteString("DedicatedAllocations");
    13432  json.BeginObject();
    13433  }
    13434 
    13435  json.BeginString("Type ");
    13436  json.ContinueString(memTypeIndex);
    13437  json.EndString();
    13438 
    13439  json.BeginArray();
    13440 
    13441  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13442  {
    13443  json.BeginObject(true);
    13444  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13445  hAlloc->PrintParameters(json);
    13446  json.EndObject();
    13447  }
    13448 
    13449  json.EndArray();
    13450  }
    13451  }
    13452  if(dedicatedAllocationsStarted)
    13453  {
    13454  json.EndObject();
    13455  }
    13456 
    13457  {
    13458  bool allocationsStarted = false;
    13459  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13460  {
    13461  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13462  {
    13463  if(allocationsStarted == false)
    13464  {
    13465  allocationsStarted = true;
    13466  json.WriteString("DefaultPools");
    13467  json.BeginObject();
    13468  }
    13469 
    13470  json.BeginString("Type ");
    13471  json.ContinueString(memTypeIndex);
    13472  json.EndString();
    13473 
    13474  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13475  }
    13476  }
    13477  if(allocationsStarted)
    13478  {
    13479  json.EndObject();
    13480  }
    13481  }
    13482 
    13483  // Custom pools
    13484  {
    13485  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13486  const size_t poolCount = m_Pools.size();
    13487  if(poolCount > 0)
    13488  {
    13489  json.WriteString("Pools");
    13490  json.BeginObject();
    13491  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13492  {
    13493  json.BeginString();
    13494  json.ContinueString(m_Pools[poolIndex]->GetId());
    13495  json.EndString();
    13496 
    13497  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13498  }
    13499  json.EndObject();
    13500  }
    13501  }
    13502 }
    13503 
    13504 #endif // #if VMA_STATS_STRING_ENABLED
    13505 
    13507 // Public interface
    13508 
    13509 VkResult vmaCreateAllocator(
    13510  const VmaAllocatorCreateInfo* pCreateInfo,
    13511  VmaAllocator* pAllocator)
    13512 {
    13513  VMA_ASSERT(pCreateInfo && pAllocator);
    13514  VMA_DEBUG_LOG("vmaCreateAllocator");
    13515  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13516  return (*pAllocator)->Init(pCreateInfo);
    13517 }
    13518 
    13519 void vmaDestroyAllocator(
    13520  VmaAllocator allocator)
    13521 {
    13522  if(allocator != VK_NULL_HANDLE)
    13523  {
    13524  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13525  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13526  vma_delete(&allocationCallbacks, allocator);
    13527  }
    13528 }
    13529 
    13531  VmaAllocator allocator,
    13532  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13533 {
    13534  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13535  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13536 }
    13537 
    13539  VmaAllocator allocator,
    13540  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13541 {
    13542  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13543  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13544 }
    13545 
    13547  VmaAllocator allocator,
    13548  uint32_t memoryTypeIndex,
    13549  VkMemoryPropertyFlags* pFlags)
    13550 {
    13551  VMA_ASSERT(allocator && pFlags);
    13552  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13553  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13554 }
    13555 
    13557  VmaAllocator allocator,
    13558  uint32_t frameIndex)
    13559 {
    13560  VMA_ASSERT(allocator);
    13561  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13562 
    13563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13564 
    13565  allocator->SetCurrentFrameIndex(frameIndex);
    13566 }
    13567 
    13568 void vmaCalculateStats(
    13569  VmaAllocator allocator,
    13570  VmaStats* pStats)
    13571 {
    13572  VMA_ASSERT(allocator && pStats);
    13573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13574  allocator->CalculateStats(pStats);
    13575 }
    13576 
    13577 #if VMA_STATS_STRING_ENABLED
    13578 
    13579 void vmaBuildStatsString(
    13580  VmaAllocator allocator,
    13581  char** ppStatsString,
    13582  VkBool32 detailedMap)
    13583 {
    13584  VMA_ASSERT(allocator && ppStatsString);
    13585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13586 
    13587  VmaStringBuilder sb(allocator);
    13588  {
    13589  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13590  json.BeginObject();
    13591 
    13592  VmaStats stats;
    13593  allocator->CalculateStats(&stats);
    13594 
    13595  json.WriteString("Total");
    13596  VmaPrintStatInfo(json, stats.total);
    13597 
    13598  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13599  {
    13600  json.BeginString("Heap ");
    13601  json.ContinueString(heapIndex);
    13602  json.EndString();
    13603  json.BeginObject();
    13604 
    13605  json.WriteString("Size");
    13606  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13607 
    13608  json.WriteString("Flags");
    13609  json.BeginArray(true);
    13610  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13611  {
    13612  json.WriteString("DEVICE_LOCAL");
    13613  }
    13614  json.EndArray();
    13615 
    13616  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13617  {
    13618  json.WriteString("Stats");
    13619  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13620  }
    13621 
    13622  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13623  {
    13624  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13625  {
    13626  json.BeginString("Type ");
    13627  json.ContinueString(typeIndex);
    13628  json.EndString();
    13629 
    13630  json.BeginObject();
    13631 
    13632  json.WriteString("Flags");
    13633  json.BeginArray(true);
    13634  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13635  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13636  {
    13637  json.WriteString("DEVICE_LOCAL");
    13638  }
    13639  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13640  {
    13641  json.WriteString("HOST_VISIBLE");
    13642  }
    13643  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13644  {
    13645  json.WriteString("HOST_COHERENT");
    13646  }
    13647  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13648  {
    13649  json.WriteString("HOST_CACHED");
    13650  }
    13651  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13652  {
    13653  json.WriteString("LAZILY_ALLOCATED");
    13654  }
    13655  json.EndArray();
    13656 
    13657  if(stats.memoryType[typeIndex].blockCount > 0)
    13658  {
    13659  json.WriteString("Stats");
    13660  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13661  }
    13662 
    13663  json.EndObject();
    13664  }
    13665  }
    13666 
    13667  json.EndObject();
    13668  }
    13669  if(detailedMap == VK_TRUE)
    13670  {
    13671  allocator->PrintDetailedMap(json);
    13672  }
    13673 
    13674  json.EndObject();
    13675  }
    13676 
    13677  const size_t len = sb.GetLength();
    13678  char* const pChars = vma_new_array(allocator, char, len + 1);
    13679  if(len > 0)
    13680  {
    13681  memcpy(pChars, sb.GetData(), len);
    13682  }
    13683  pChars[len] = '\0';
    13684  *ppStatsString = pChars;
    13685 }
    13686 
    13687 void vmaFreeStatsString(
    13688  VmaAllocator allocator,
    13689  char* pStatsString)
    13690 {
    13691  if(pStatsString != VMA_NULL)
    13692  {
    13693  VMA_ASSERT(allocator);
    13694  size_t len = strlen(pStatsString);
    13695  vma_delete_array(allocator, pStatsString, len + 1);
    13696  }
    13697 }
    13698 
    13699 #endif // #if VMA_STATS_STRING_ENABLED
    13700 
    13701 /*
    13702 This function is not protected by any mutex because it just reads immutable data.
    13703 */
    13704 VkResult vmaFindMemoryTypeIndex(
    13705  VmaAllocator allocator,
    13706  uint32_t memoryTypeBits,
    13707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13708  uint32_t* pMemoryTypeIndex)
    13709 {
    13710  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13713 
    13714  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13715  {
    13716  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13717  }
    13718 
    13719  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13720  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13721 
    13722  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13723  if(mapped)
    13724  {
    13725  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13726  }
    13727 
    13728  // Convert usage to requiredFlags and preferredFlags.
    13729  switch(pAllocationCreateInfo->usage)
    13730  {
    13732  break;
    13734  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13735  {
    13736  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13737  }
    13738  break;
    13740  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13741  break;
    13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13744  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13745  {
    13746  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13747  }
    13748  break;
    13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13751  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13752  break;
    13753  default:
    13754  break;
    13755  }
    13756 
    13757  *pMemoryTypeIndex = UINT32_MAX;
    13758  uint32_t minCost = UINT32_MAX;
    13759  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13760  memTypeIndex < allocator->GetMemoryTypeCount();
    13761  ++memTypeIndex, memTypeBit <<= 1)
    13762  {
    13763  // This memory type is acceptable according to memoryTypeBits bitmask.
    13764  if((memTypeBit & memoryTypeBits) != 0)
    13765  {
    13766  const VkMemoryPropertyFlags currFlags =
    13767  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13768  // This memory type contains requiredFlags.
    13769  if((requiredFlags & ~currFlags) == 0)
    13770  {
    13771  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13772  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13773  // Remember memory type with lowest cost.
    13774  if(currCost < minCost)
    13775  {
    13776  *pMemoryTypeIndex = memTypeIndex;
    13777  if(currCost == 0)
    13778  {
    13779  return VK_SUCCESS;
    13780  }
    13781  minCost = currCost;
    13782  }
    13783  }
    13784  }
    13785  }
    13786  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13787 }
    13788 
    13790  VmaAllocator allocator,
    13791  const VkBufferCreateInfo* pBufferCreateInfo,
    13792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13793  uint32_t* pMemoryTypeIndex)
    13794 {
    13795  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13796  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13797  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13798  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13799 
    13800  const VkDevice hDev = allocator->m_hDevice;
    13801  VkBuffer hBuffer = VK_NULL_HANDLE;
    13802  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13803  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13804  if(res == VK_SUCCESS)
    13805  {
    13806  VkMemoryRequirements memReq = {};
    13807  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13808  hDev, hBuffer, &memReq);
    13809 
    13810  res = vmaFindMemoryTypeIndex(
    13811  allocator,
    13812  memReq.memoryTypeBits,
    13813  pAllocationCreateInfo,
    13814  pMemoryTypeIndex);
    13815 
    13816  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13817  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13818  }
    13819  return res;
    13820 }
    13821 
    13823  VmaAllocator allocator,
    13824  const VkImageCreateInfo* pImageCreateInfo,
    13825  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13826  uint32_t* pMemoryTypeIndex)
    13827 {
    13828  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13829  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13830  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13831  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13832 
    13833  const VkDevice hDev = allocator->m_hDevice;
    13834  VkImage hImage = VK_NULL_HANDLE;
    13835  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13836  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13837  if(res == VK_SUCCESS)
    13838  {
    13839  VkMemoryRequirements memReq = {};
    13840  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13841  hDev, hImage, &memReq);
    13842 
    13843  res = vmaFindMemoryTypeIndex(
    13844  allocator,
    13845  memReq.memoryTypeBits,
    13846  pAllocationCreateInfo,
    13847  pMemoryTypeIndex);
    13848 
    13849  allocator->GetVulkanFunctions().vkDestroyImage(
    13850  hDev, hImage, allocator->GetAllocationCallbacks());
    13851  }
    13852  return res;
    13853 }
    13854 
    13855 VkResult vmaCreatePool(
    13856  VmaAllocator allocator,
    13857  const VmaPoolCreateInfo* pCreateInfo,
    13858  VmaPool* pPool)
    13859 {
    13860  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13861 
    13862  VMA_DEBUG_LOG("vmaCreatePool");
    13863 
    13864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13865 
    13866  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13872  }
    13873 #endif
    13874 
    13875  return res;
    13876 }
    13877 
    13878 void vmaDestroyPool(
    13879  VmaAllocator allocator,
    13880  VmaPool pool)
    13881 {
    13882  VMA_ASSERT(allocator);
    13883 
    13884  if(pool == VK_NULL_HANDLE)
    13885  {
    13886  return;
    13887  }
    13888 
    13889  VMA_DEBUG_LOG("vmaDestroyPool");
    13890 
    13891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13892 
    13893 #if VMA_RECORDING_ENABLED
    13894  if(allocator->GetRecorder() != VMA_NULL)
    13895  {
    13896  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13897  }
    13898 #endif
    13899 
    13900  allocator->DestroyPool(pool);
    13901 }
    13902 
    13903 void vmaGetPoolStats(
    13904  VmaAllocator allocator,
    13905  VmaPool pool,
    13906  VmaPoolStats* pPoolStats)
    13907 {
    13908  VMA_ASSERT(allocator && pool && pPoolStats);
    13909 
    13910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13911 
    13912  allocator->GetPoolStats(pool, pPoolStats);
    13913 }
    13914 
    13916  VmaAllocator allocator,
    13917  VmaPool pool,
    13918  size_t* pLostAllocationCount)
    13919 {
    13920  VMA_ASSERT(allocator && pool);
    13921 
    13922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13923 
    13924 #if VMA_RECORDING_ENABLED
    13925  if(allocator->GetRecorder() != VMA_NULL)
    13926  {
    13927  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13928  }
    13929 #endif
    13930 
    13931  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13932 }
    13933 
    13934 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13935 {
    13936  VMA_ASSERT(allocator && pool);
    13937 
    13938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13939 
    13940  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13941 
    13942  return allocator->CheckPoolCorruption(pool);
    13943 }
    13944 
    13945 VkResult vmaAllocateMemory(
    13946  VmaAllocator allocator,
    13947  const VkMemoryRequirements* pVkMemoryRequirements,
    13948  const VmaAllocationCreateInfo* pCreateInfo,
    13949  VmaAllocation* pAllocation,
    13950  VmaAllocationInfo* pAllocationInfo)
    13951 {
    13952  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13953 
    13954  VMA_DEBUG_LOG("vmaAllocateMemory");
    13955 
    13956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13957 
    13958  VkResult result = allocator->AllocateMemory(
    13959  *pVkMemoryRequirements,
    13960  false, // requiresDedicatedAllocation
    13961  false, // prefersDedicatedAllocation
    13962  VK_NULL_HANDLE, // dedicatedBuffer
    13963  VK_NULL_HANDLE, // dedicatedImage
    13964  *pCreateInfo,
    13965  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13966  pAllocation);
    13967 
    13968 #if VMA_RECORDING_ENABLED
    13969  if(allocator->GetRecorder() != VMA_NULL)
    13970  {
    13971  allocator->GetRecorder()->RecordAllocateMemory(
    13972  allocator->GetCurrentFrameIndex(),
    13973  *pVkMemoryRequirements,
    13974  *pCreateInfo,
    13975  *pAllocation);
    13976  }
    13977 #endif
    13978 
    13979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13980  {
    13981  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13982  }
    13983 
    13984  return result;
    13985 }
    13986 
    13988  VmaAllocator allocator,
    13989  VkBuffer buffer,
    13990  const VmaAllocationCreateInfo* pCreateInfo,
    13991  VmaAllocation* pAllocation,
    13992  VmaAllocationInfo* pAllocationInfo)
    13993 {
    13994  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13995 
    13996  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13997 
    13998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13999 
    14000  VkMemoryRequirements vkMemReq = {};
    14001  bool requiresDedicatedAllocation = false;
    14002  bool prefersDedicatedAllocation = false;
    14003  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14004  requiresDedicatedAllocation,
    14005  prefersDedicatedAllocation);
    14006 
    14007  VkResult result = allocator->AllocateMemory(
    14008  vkMemReq,
    14009  requiresDedicatedAllocation,
    14010  prefersDedicatedAllocation,
    14011  buffer, // dedicatedBuffer
    14012  VK_NULL_HANDLE, // dedicatedImage
    14013  *pCreateInfo,
    14014  VMA_SUBALLOCATION_TYPE_BUFFER,
    14015  pAllocation);
    14016 
    14017 #if VMA_RECORDING_ENABLED
    14018  if(allocator->GetRecorder() != VMA_NULL)
    14019  {
    14020  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14021  allocator->GetCurrentFrameIndex(),
    14022  vkMemReq,
    14023  requiresDedicatedAllocation,
    14024  prefersDedicatedAllocation,
    14025  *pCreateInfo,
    14026  *pAllocation);
    14027  }
    14028 #endif
    14029 
    14030  if(pAllocationInfo && result == VK_SUCCESS)
    14031  {
    14032  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14033  }
    14034 
    14035  return result;
    14036 }
    14037 
    14038 VkResult vmaAllocateMemoryForImage(
    14039  VmaAllocator allocator,
    14040  VkImage image,
    14041  const VmaAllocationCreateInfo* pCreateInfo,
    14042  VmaAllocation* pAllocation,
    14043  VmaAllocationInfo* pAllocationInfo)
    14044 {
    14045  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14046 
    14047  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14048 
    14049  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14050 
    14051  VkMemoryRequirements vkMemReq = {};
    14052  bool requiresDedicatedAllocation = false;
    14053  bool prefersDedicatedAllocation = false;
    14054  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14055  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14056 
    14057  VkResult result = allocator->AllocateMemory(
    14058  vkMemReq,
    14059  requiresDedicatedAllocation,
    14060  prefersDedicatedAllocation,
    14061  VK_NULL_HANDLE, // dedicatedBuffer
    14062  image, // dedicatedImage
    14063  *pCreateInfo,
    14064  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14065  pAllocation);
    14066 
    14067 #if VMA_RECORDING_ENABLED
    14068  if(allocator->GetRecorder() != VMA_NULL)
    14069  {
    14070  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14071  allocator->GetCurrentFrameIndex(),
    14072  vkMemReq,
    14073  requiresDedicatedAllocation,
    14074  prefersDedicatedAllocation,
    14075  *pCreateInfo,
    14076  *pAllocation);
    14077  }
    14078 #endif
    14079 
    14080  if(pAllocationInfo && result == VK_SUCCESS)
    14081  {
    14082  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14083  }
    14084 
    14085  return result;
    14086 }
    14087 
    14088 void vmaFreeMemory(
    14089  VmaAllocator allocator,
    14090  VmaAllocation allocation)
    14091 {
    14092  VMA_ASSERT(allocator);
    14093 
    14094  if(allocation == VK_NULL_HANDLE)
    14095  {
    14096  return;
    14097  }
    14098 
    14099  VMA_DEBUG_LOG("vmaFreeMemory");
    14100 
    14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14102 
    14103 #if VMA_RECORDING_ENABLED
    14104  if(allocator->GetRecorder() != VMA_NULL)
    14105  {
    14106  allocator->GetRecorder()->RecordFreeMemory(
    14107  allocator->GetCurrentFrameIndex(),
    14108  allocation);
    14109  }
    14110 #endif
    14111 
    14112  allocator->FreeMemory(allocation);
    14113 }
    14114 
    14115 VkResult vmaResizeAllocation(
    14116  VmaAllocator allocator,
    14117  VmaAllocation allocation,
    14118  VkDeviceSize newSize)
    14119 {
    14120  VMA_ASSERT(allocator && allocation);
    14121 
    14122  VMA_DEBUG_LOG("vmaResizeAllocation");
    14123 
    14124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14125 
    14126 #if VMA_RECORDING_ENABLED
    14127  if(allocator->GetRecorder() != VMA_NULL)
    14128  {
    14129  allocator->GetRecorder()->RecordResizeAllocation(
    14130  allocator->GetCurrentFrameIndex(),
    14131  allocation,
    14132  newSize);
    14133  }
    14134 #endif
    14135 
    14136  return allocator->ResizeAllocation(allocation, newSize);
    14137 }
    14138 
    14140  VmaAllocator allocator,
    14141  VmaAllocation allocation,
    14142  VmaAllocationInfo* pAllocationInfo)
    14143 {
    14144  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14145 
    14146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14147 
    14148 #if VMA_RECORDING_ENABLED
    14149  if(allocator->GetRecorder() != VMA_NULL)
    14150  {
    14151  allocator->GetRecorder()->RecordGetAllocationInfo(
    14152  allocator->GetCurrentFrameIndex(),
    14153  allocation);
    14154  }
    14155 #endif
    14156 
    14157  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14158 }
    14159 
    14160 VkBool32 vmaTouchAllocation(
    14161  VmaAllocator allocator,
    14162  VmaAllocation allocation)
    14163 {
    14164  VMA_ASSERT(allocator && allocation);
    14165 
    14166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14167 
    14168 #if VMA_RECORDING_ENABLED
    14169  if(allocator->GetRecorder() != VMA_NULL)
    14170  {
    14171  allocator->GetRecorder()->RecordTouchAllocation(
    14172  allocator->GetCurrentFrameIndex(),
    14173  allocation);
    14174  }
    14175 #endif
    14176 
    14177  return allocator->TouchAllocation(allocation);
    14178 }
    14179 
    14181  VmaAllocator allocator,
    14182  VmaAllocation allocation,
    14183  void* pUserData)
    14184 {
    14185  VMA_ASSERT(allocator && allocation);
    14186 
    14187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14188 
    14189  allocation->SetUserData(allocator, pUserData);
    14190 
    14191 #if VMA_RECORDING_ENABLED
    14192  if(allocator->GetRecorder() != VMA_NULL)
    14193  {
    14194  allocator->GetRecorder()->RecordSetAllocationUserData(
    14195  allocator->GetCurrentFrameIndex(),
    14196  allocation,
    14197  pUserData);
    14198  }
    14199 #endif
    14200 }
    14201 
    14203  VmaAllocator allocator,
    14204  VmaAllocation* pAllocation)
    14205 {
    14206  VMA_ASSERT(allocator && pAllocation);
    14207 
    14208  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14209 
    14210  allocator->CreateLostAllocation(pAllocation);
    14211 
    14212 #if VMA_RECORDING_ENABLED
    14213  if(allocator->GetRecorder() != VMA_NULL)
    14214  {
    14215  allocator->GetRecorder()->RecordCreateLostAllocation(
    14216  allocator->GetCurrentFrameIndex(),
    14217  *pAllocation);
    14218  }
    14219 #endif
    14220 }
    14221 
    14222 VkResult vmaMapMemory(
    14223  VmaAllocator allocator,
    14224  VmaAllocation allocation,
    14225  void** ppData)
    14226 {
    14227  VMA_ASSERT(allocator && allocation && ppData);
    14228 
    14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14230 
    14231  VkResult res = allocator->Map(allocation, ppData);
    14232 
    14233 #if VMA_RECORDING_ENABLED
    14234  if(allocator->GetRecorder() != VMA_NULL)
    14235  {
    14236  allocator->GetRecorder()->RecordMapMemory(
    14237  allocator->GetCurrentFrameIndex(),
    14238  allocation);
    14239  }
    14240 #endif
    14241 
    14242  return res;
    14243 }
    14244 
    14245 void vmaUnmapMemory(
    14246  VmaAllocator allocator,
    14247  VmaAllocation allocation)
    14248 {
    14249  VMA_ASSERT(allocator && allocation);
    14250 
    14251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14252 
    14253 #if VMA_RECORDING_ENABLED
    14254  if(allocator->GetRecorder() != VMA_NULL)
    14255  {
    14256  allocator->GetRecorder()->RecordUnmapMemory(
    14257  allocator->GetCurrentFrameIndex(),
    14258  allocation);
    14259  }
    14260 #endif
    14261 
    14262  allocator->Unmap(allocation);
    14263 }
    14264 
    14265 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14266 {
    14267  VMA_ASSERT(allocator && allocation);
    14268 
    14269  VMA_DEBUG_LOG("vmaFlushAllocation");
    14270 
    14271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14272 
    14273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14274 
    14275 #if VMA_RECORDING_ENABLED
    14276  if(allocator->GetRecorder() != VMA_NULL)
    14277  {
    14278  allocator->GetRecorder()->RecordFlushAllocation(
    14279  allocator->GetCurrentFrameIndex(),
    14280  allocation, offset, size);
    14281  }
    14282 #endif
    14283 }
    14284 
    14285 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14286 {
    14287  VMA_ASSERT(allocator && allocation);
    14288 
    14289  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14290 
    14291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14292 
    14293  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14294 
    14295 #if VMA_RECORDING_ENABLED
    14296  if(allocator->GetRecorder() != VMA_NULL)
    14297  {
    14298  allocator->GetRecorder()->RecordInvalidateAllocation(
    14299  allocator->GetCurrentFrameIndex(),
    14300  allocation, offset, size);
    14301  }
    14302 #endif
    14303 }
    14304 
    14305 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14306 {
    14307  VMA_ASSERT(allocator);
    14308 
    14309  VMA_DEBUG_LOG("vmaCheckCorruption");
    14310 
    14311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14312 
    14313  return allocator->CheckCorruption(memoryTypeBits);
    14314 }
    14315 
    14316 VkResult vmaDefragment(
    14317  VmaAllocator allocator,
    14318  VmaAllocation* pAllocations,
    14319  size_t allocationCount,
    14320  VkBool32* pAllocationsChanged,
    14321  const VmaDefragmentationInfo *pDefragmentationInfo,
    14322  VmaDefragmentationStats* pDefragmentationStats)
    14323 {
    14324  VMA_ASSERT(allocator && pAllocations);
    14325 
    14326  VMA_DEBUG_LOG("vmaDefragment");
    14327 
    14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14329 
    14330  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14331 }
    14332 
    14333 VkResult vmaBindBufferMemory(
    14334  VmaAllocator allocator,
    14335  VmaAllocation allocation,
    14336  VkBuffer buffer)
    14337 {
    14338  VMA_ASSERT(allocator && allocation && buffer);
    14339 
    14340  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14341 
    14342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14343 
    14344  return allocator->BindBufferMemory(allocation, buffer);
    14345 }
    14346 
    14347 VkResult vmaBindImageMemory(
    14348  VmaAllocator allocator,
    14349  VmaAllocation allocation,
    14350  VkImage image)
    14351 {
    14352  VMA_ASSERT(allocator && allocation && image);
    14353 
    14354  VMA_DEBUG_LOG("vmaBindImageMemory");
    14355 
    14356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14357 
    14358  return allocator->BindImageMemory(allocation, image);
    14359 }
    14360 
    14361 VkResult vmaCreateBuffer(
    14362  VmaAllocator allocator,
    14363  const VkBufferCreateInfo* pBufferCreateInfo,
    14364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14365  VkBuffer* pBuffer,
    14366  VmaAllocation* pAllocation,
    14367  VmaAllocationInfo* pAllocationInfo)
    14368 {
    14369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14370 
    14371  if(pBufferCreateInfo->size == 0)
    14372  {
    14373  return VK_ERROR_VALIDATION_FAILED_EXT;
    14374  }
    14375 
    14376  VMA_DEBUG_LOG("vmaCreateBuffer");
    14377 
    14378  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14379 
    14380  *pBuffer = VK_NULL_HANDLE;
    14381  *pAllocation = VK_NULL_HANDLE;
    14382 
    14383  // 1. Create VkBuffer.
    14384  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14385  allocator->m_hDevice,
    14386  pBufferCreateInfo,
    14387  allocator->GetAllocationCallbacks(),
    14388  pBuffer);
    14389  if(res >= 0)
    14390  {
    14391  // 2. vkGetBufferMemoryRequirements.
    14392  VkMemoryRequirements vkMemReq = {};
    14393  bool requiresDedicatedAllocation = false;
    14394  bool prefersDedicatedAllocation = false;
    14395  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14396  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14397 
    14398  // Make sure alignment requirements for specific buffer usages reported
    14399  // in Physical Device Properties are included in alignment reported by memory requirements.
    14400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14401  {
    14402  VMA_ASSERT(vkMemReq.alignment %
    14403  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14404  }
    14405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14406  {
    14407  VMA_ASSERT(vkMemReq.alignment %
    14408  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14409  }
    14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14411  {
    14412  VMA_ASSERT(vkMemReq.alignment %
    14413  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14414  }
    14415 
    14416  // 3. Allocate memory using allocator.
    14417  res = allocator->AllocateMemory(
    14418  vkMemReq,
    14419  requiresDedicatedAllocation,
    14420  prefersDedicatedAllocation,
    14421  *pBuffer, // dedicatedBuffer
    14422  VK_NULL_HANDLE, // dedicatedImage
    14423  *pAllocationCreateInfo,
    14424  VMA_SUBALLOCATION_TYPE_BUFFER,
    14425  pAllocation);
    14426 
    14427 #if VMA_RECORDING_ENABLED
    14428  if(allocator->GetRecorder() != VMA_NULL)
    14429  {
    14430  allocator->GetRecorder()->RecordCreateBuffer(
    14431  allocator->GetCurrentFrameIndex(),
    14432  *pBufferCreateInfo,
    14433  *pAllocationCreateInfo,
    14434  *pAllocation);
    14435  }
    14436 #endif
    14437 
    14438  if(res >= 0)
    14439  {
    14440  // 3. Bind buffer with memory.
    14441  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14442  if(res >= 0)
    14443  {
    14444  // All steps succeeded.
    14445  #if VMA_STATS_STRING_ENABLED
    14446  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14447  #endif
    14448  if(pAllocationInfo != VMA_NULL)
    14449  {
    14450  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14451  }
    14452 
    14453  return VK_SUCCESS;
    14454  }
    14455  allocator->FreeMemory(*pAllocation);
    14456  *pAllocation = VK_NULL_HANDLE;
    14457  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14458  *pBuffer = VK_NULL_HANDLE;
    14459  return res;
    14460  }
    14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14462  *pBuffer = VK_NULL_HANDLE;
    14463  return res;
    14464  }
    14465  return res;
    14466 }
    14467 
    14468 void vmaDestroyBuffer(
    14469  VmaAllocator allocator,
    14470  VkBuffer buffer,
    14471  VmaAllocation allocation)
    14472 {
    14473  VMA_ASSERT(allocator);
    14474 
    14475  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14476  {
    14477  return;
    14478  }
    14479 
    14480  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14481 
    14482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14483 
    14484 #if VMA_RECORDING_ENABLED
    14485  if(allocator->GetRecorder() != VMA_NULL)
    14486  {
    14487  allocator->GetRecorder()->RecordDestroyBuffer(
    14488  allocator->GetCurrentFrameIndex(),
    14489  allocation);
    14490  }
    14491 #endif
    14492 
    14493  if(buffer != VK_NULL_HANDLE)
    14494  {
    14495  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14496  }
    14497 
    14498  if(allocation != VK_NULL_HANDLE)
    14499  {
    14500  allocator->FreeMemory(allocation);
    14501  }
    14502 }
    14503 
    14504 VkResult vmaCreateImage(
    14505  VmaAllocator allocator,
    14506  const VkImageCreateInfo* pImageCreateInfo,
    14507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14508  VkImage* pImage,
    14509  VmaAllocation* pAllocation,
    14510  VmaAllocationInfo* pAllocationInfo)
    14511 {
    14512  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14513 
    14514  if(pImageCreateInfo->extent.width == 0 ||
    14515  pImageCreateInfo->extent.height == 0 ||
    14516  pImageCreateInfo->extent.depth == 0 ||
    14517  pImageCreateInfo->mipLevels == 0 ||
    14518  pImageCreateInfo->arrayLayers == 0)
    14519  {
    14520  return VK_ERROR_VALIDATION_FAILED_EXT;
    14521  }
    14522 
    14523  VMA_DEBUG_LOG("vmaCreateImage");
    14524 
    14525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14526 
    14527  *pImage = VK_NULL_HANDLE;
    14528  *pAllocation = VK_NULL_HANDLE;
    14529 
    14530  // 1. Create VkImage.
    14531  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14532  allocator->m_hDevice,
    14533  pImageCreateInfo,
    14534  allocator->GetAllocationCallbacks(),
    14535  pImage);
    14536  if(res >= 0)
    14537  {
    14538  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14539  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14540  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14541 
    14542  // 2. Allocate memory using allocator.
    14543  VkMemoryRequirements vkMemReq = {};
    14544  bool requiresDedicatedAllocation = false;
    14545  bool prefersDedicatedAllocation = false;
    14546  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14547  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14548 
    14549  res = allocator->AllocateMemory(
    14550  vkMemReq,
    14551  requiresDedicatedAllocation,
    14552  prefersDedicatedAllocation,
    14553  VK_NULL_HANDLE, // dedicatedBuffer
    14554  *pImage, // dedicatedImage
    14555  *pAllocationCreateInfo,
    14556  suballocType,
    14557  pAllocation);
    14558 
    14559 #if VMA_RECORDING_ENABLED
    14560  if(allocator->GetRecorder() != VMA_NULL)
    14561  {
    14562  allocator->GetRecorder()->RecordCreateImage(
    14563  allocator->GetCurrentFrameIndex(),
    14564  *pImageCreateInfo,
    14565  *pAllocationCreateInfo,
    14566  *pAllocation);
    14567  }
    14568 #endif
    14569 
    14570  if(res >= 0)
    14571  {
    14572  // 3. Bind image with memory.
    14573  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14574  if(res >= 0)
    14575  {
    14576  // All steps succeeded.
    14577  #if VMA_STATS_STRING_ENABLED
    14578  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14579  #endif
    14580  if(pAllocationInfo != VMA_NULL)
    14581  {
    14582  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14583  }
    14584 
    14585  return VK_SUCCESS;
    14586  }
    14587  allocator->FreeMemory(*pAllocation);
    14588  *pAllocation = VK_NULL_HANDLE;
    14589  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14590  *pImage = VK_NULL_HANDLE;
    14591  return res;
    14592  }
    14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14594  *pImage = VK_NULL_HANDLE;
    14595  return res;
    14596  }
    14597  return res;
    14598 }
    14599 
    14600 void vmaDestroyImage(
    14601  VmaAllocator allocator,
    14602  VkImage image,
    14603  VmaAllocation allocation)
    14604 {
    14605  VMA_ASSERT(allocator);
    14606 
    14607  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14608  {
    14609  return;
    14610  }
    14611 
    14612  VMA_DEBUG_LOG("vmaDestroyImage");
    14613 
    14614  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14615 
    14616 #if VMA_RECORDING_ENABLED
    14617  if(allocator->GetRecorder() != VMA_NULL)
    14618  {
    14619  allocator->GetRecorder()->RecordDestroyImage(
    14620  allocator->GetCurrentFrameIndex(),
    14621  allocation);
    14622  }
    14623 #endif
    14624 
    14625  if(image != VK_NULL_HANDLE)
    14626  {
    14627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14628  }
    14629  if(allocation != VK_NULL_HANDLE)
    14630  {
    14631  allocator->FreeMemory(allocation);
    14632  }
    14633 }
    14634 
    14635 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1887
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    -
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1641
    +
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1643
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    -
    Definition: vk_mem_alloc.h:1615
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2207
    -
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1596
    +
    Definition: vk_mem_alloc.h:1617
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2209
    +
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1598
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1842
    -
    Definition: vk_mem_alloc.h:1945
    -
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1588
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2307
    -
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1638
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2577
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2096
    -
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1485
    +
    Definition: vk_mem_alloc.h:1844
    +
    Definition: vk_mem_alloc.h:1947
    +
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1590
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2309
    +
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1640
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2579
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2098
    +
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1487
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2188
    -
    Definition: vk_mem_alloc.h:1922
    -
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1577
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1995
    -
    Definition: vk_mem_alloc.h:1869
    -
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1650
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2124
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2190
    +
    Definition: vk_mem_alloc.h:1924
    +
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1579
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1997
    +
    Definition: vk_mem_alloc.h:1871
    +
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1652
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2126
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1703
    -
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1635
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1705
    +
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1637
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1873
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1875
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1775
    -
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1593
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1774
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2581
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1777
    +
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1595
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1776
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2583
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1667
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1784
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2589
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1979
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2572
    -
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1594
    -
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1519
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1669
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1786
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2591
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1981
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2574
    +
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1596
    +
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1521
    Represents main object of this library initialized.
    -
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1644
    +
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1646
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2138
    -
    Definition: vk_mem_alloc.h:2132
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1710
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2317
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2140
    +
    Definition: vk_mem_alloc.h:2134
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1712
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2319
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    -
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1589
    -
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1613
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2016
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2158
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2194
    +
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1591
    +
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1615
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2018
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2160
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2196
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    -
    Definition: vk_mem_alloc.h:1575
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2141
    +
    Definition: vk_mem_alloc.h:1577
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2143
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1820
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1822
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2567
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2569
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2585
    -
    Definition: vk_mem_alloc.h:1859
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2003
    -
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1592
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2587
    +
    Definition: vk_mem_alloc.h:1861
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2005
    +
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1594
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1780
    -
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1525
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1782
    +
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1527
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    -
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1546
    +
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1548
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    -
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1617
    -
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1551
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2587
    +
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1619
    +
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1553
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2589
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1990
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2204
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1992
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2206
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    -
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1585
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1763
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2153
    -
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1538
    -
    Definition: vk_mem_alloc.h:2128
    +
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1587
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1765
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2155
    +
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1540
    +
    Definition: vk_mem_alloc.h:2130
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1929
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1776
    -
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    -
    Definition: vk_mem_alloc.h:1953
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2144
    -
    Definition: vk_mem_alloc.h:1868
    -
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1591
    +
    Definition: vk_mem_alloc.h:1931
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1778
    +
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1544
    +
    Definition: vk_mem_alloc.h:1955
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2146
    +
    Definition: vk_mem_alloc.h:1870
    +
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1593
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1985
    -
    Definition: vk_mem_alloc.h:1976
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1987
    +
    Definition: vk_mem_alloc.h:1978
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1766
    -
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1587
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2166
    -
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1653
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2197
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1974
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2009
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1768
    +
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1589
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2168
    +
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1655
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2199
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1976
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2011
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1691
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1782
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1909
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1775
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1693
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1784
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1911
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1777
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    -
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1598
    -
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1623
    -
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1540
    -
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1597
    +
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1600
    +
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    +
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    +
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1599
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2180
    -
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1590
    -
    Definition: vk_mem_alloc.h:1940
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2182
    +
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1592
    +
    Definition: vk_mem_alloc.h:1942
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    -
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1631
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2331
    -
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1647
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1775
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1772
    +
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1633
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2333
    +
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1649
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1777
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1774
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2185
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2187
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1949
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2312
    -
    Definition: vk_mem_alloc.h:1960
    -
    Definition: vk_mem_alloc.h:1972
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2583
    -
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1583
    +
    Definition: vk_mem_alloc.h:1951
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2314
    +
    Definition: vk_mem_alloc.h:1962
    +
    Definition: vk_mem_alloc.h:1974
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2585
    +
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1585
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1770
    -
    Definition: vk_mem_alloc.h:1825
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2134
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1772
    +
    Definition: vk_mem_alloc.h:1827
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2136
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    -
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1620
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1768
    -
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1595
    -
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1599
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1896
    -
    Definition: vk_mem_alloc.h:1967
    -
    Definition: vk_mem_alloc.h:1852
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2326
    +
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1622
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1770
    +
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1597
    +
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1601
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1898
    +
    Definition: vk_mem_alloc.h:1969
    +
    Definition: vk_mem_alloc.h:1854
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2328
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    -
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1573
    +
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1575
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    -
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1586
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2113
    +
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1588
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2115
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2293
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2295
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1957
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2078
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1776
    +
    Definition: vk_mem_alloc.h:1959
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2080
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1778
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - -
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1607
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1783
    + +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1609
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1785
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2191
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1776
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2193
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1778
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2298
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2300
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1693  const VkDeviceSize* pHeapSizeLimit;
    1714 
    1716 VkResult vmaCreateAllocator(
    1717  const VmaAllocatorCreateInfo* pCreateInfo,
    1718  VmaAllocator* pAllocator);
    1719 
    1721 void vmaDestroyAllocator(
    1722  VmaAllocator allocator);
    1723 
    1729  VmaAllocator allocator,
    1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1731 
    1737  VmaAllocator allocator,
    1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1739 
    1747  VmaAllocator allocator,
    1748  uint32_t memoryTypeIndex,
    1749  VkMemoryPropertyFlags* pFlags);
    1750 
    1760  VmaAllocator allocator,
    1761  uint32_t frameIndex);
    1762 
    1765 typedef struct VmaStatInfo
    1766 {
    1768  uint32_t blockCount;
    1774  VkDeviceSize usedBytes;
    1776  VkDeviceSize unusedBytes;
    1779 } VmaStatInfo;
    1780 
    1782 typedef struct VmaStats
    1783 {
    1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1787 } VmaStats;
    1788 
    1790 void vmaCalculateStats(
    1791  VmaAllocator allocator,
    1792  VmaStats* pStats);
    1793 
    1794 #define VMA_STATS_STRING_ENABLED 1
    1795 
    1796 #if VMA_STATS_STRING_ENABLED
    1797 
    1799 
    1801 void vmaBuildStatsString(
    1802  VmaAllocator allocator,
    1803  char** ppStatsString,
    1804  VkBool32 detailedMap);
    1805 
    1806 void vmaFreeStatsString(
    1807  VmaAllocator allocator,
    1808  char* pStatsString);
    1809 
    1810 #endif // #if VMA_STATS_STRING_ENABLED
    1811 
    1820 VK_DEFINE_HANDLE(VmaPool)
    1821 
    1822 typedef enum VmaMemoryUsage
    1823 {
    1872 } VmaMemoryUsage;
    1873 
    1888 
    1943 
    1956 
    1966 
    1973 
    1977 
    1979 {
    1992  VkMemoryPropertyFlags requiredFlags;
    1997  VkMemoryPropertyFlags preferredFlags;
    2005  uint32_t memoryTypeBits;
    2018  void* pUserData;
    2020 
    2037 VkResult vmaFindMemoryTypeIndex(
    2038  VmaAllocator allocator,
    2039  uint32_t memoryTypeBits,
    2040  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2041  uint32_t* pMemoryTypeIndex);
    2042 
    2056  VmaAllocator allocator,
    2057  const VkBufferCreateInfo* pBufferCreateInfo,
    2058  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2059  uint32_t* pMemoryTypeIndex);
    2060 
    2074  VmaAllocator allocator,
    2075  const VkImageCreateInfo* pImageCreateInfo,
    2076  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2077  uint32_t* pMemoryTypeIndex);
    2078 
    2099 
    2116 
    2127 
    2133 
    2136 typedef VkFlags VmaPoolCreateFlags;
    2137 
    2140 typedef struct VmaPoolCreateInfo {
    2155  VkDeviceSize blockSize;
    2184 
    2187 typedef struct VmaPoolStats {
    2190  VkDeviceSize size;
    2193  VkDeviceSize unusedSize;
    2206  VkDeviceSize unusedRangeSizeMax;
    2209  size_t blockCount;
    2210 } VmaPoolStats;
    2211 
    2218 VkResult vmaCreatePool(
    2219  VmaAllocator allocator,
    2220  const VmaPoolCreateInfo* pCreateInfo,
    2221  VmaPool* pPool);
    2222 
    2225 void vmaDestroyPool(
    2226  VmaAllocator allocator,
    2227  VmaPool pool);
    2228 
    2235 void vmaGetPoolStats(
    2236  VmaAllocator allocator,
    2237  VmaPool pool,
    2238  VmaPoolStats* pPoolStats);
    2239 
    2247  VmaAllocator allocator,
    2248  VmaPool pool,
    2249  size_t* pLostAllocationCount);
    2250 
    2265 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2266 
    2291 VK_DEFINE_HANDLE(VmaAllocation)
    2292 
    2293 
    2295 typedef struct VmaAllocationInfo {
    2300  uint32_t memoryType;
    2309  VkDeviceMemory deviceMemory;
    2314  VkDeviceSize offset;
    2319  VkDeviceSize size;
    2333  void* pUserData;
    2335 
    2346 VkResult vmaAllocateMemory(
    2347  VmaAllocator allocator,
    2348  const VkMemoryRequirements* pVkMemoryRequirements,
    2349  const VmaAllocationCreateInfo* pCreateInfo,
    2350  VmaAllocation* pAllocation,
    2351  VmaAllocationInfo* pAllocationInfo);
    2352 
    2360  VmaAllocator allocator,
    2361  VkBuffer buffer,
    2362  const VmaAllocationCreateInfo* pCreateInfo,
    2363  VmaAllocation* pAllocation,
    2364  VmaAllocationInfo* pAllocationInfo);
    2365 
    2367 VkResult vmaAllocateMemoryForImage(
    2368  VmaAllocator allocator,
    2369  VkImage image,
    2370  const VmaAllocationCreateInfo* pCreateInfo,
    2371  VmaAllocation* pAllocation,
    2372  VmaAllocationInfo* pAllocationInfo);
    2373 
    2375 void vmaFreeMemory(
    2376  VmaAllocator allocator,
    2377  VmaAllocation allocation);
    2378 
    2399 VkResult vmaResizeAllocation(
    2400  VmaAllocator allocator,
    2401  VmaAllocation allocation,
    2402  VkDeviceSize newSize);
    2403 
    2421  VmaAllocator allocator,
    2422  VmaAllocation allocation,
    2423  VmaAllocationInfo* pAllocationInfo);
    2424 
    2439 VkBool32 vmaTouchAllocation(
    2440  VmaAllocator allocator,
    2441  VmaAllocation allocation);
    2442 
    2457  VmaAllocator allocator,
    2458  VmaAllocation allocation,
    2459  void* pUserData);
    2460 
    2472  VmaAllocator allocator,
    2473  VmaAllocation* pAllocation);
    2474 
    2509 VkResult vmaMapMemory(
    2510  VmaAllocator allocator,
    2511  VmaAllocation allocation,
    2512  void** ppData);
    2513 
    2518 void vmaUnmapMemory(
    2519  VmaAllocator allocator,
    2520  VmaAllocation allocation);
    2521 
    2534 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2535 
    2548 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2549 
    2566 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2567 
    2569 typedef struct VmaDefragmentationInfo {
    2574  VkDeviceSize maxBytesToMove;
    2581 
    2583 typedef struct VmaDefragmentationStats {
    2585  VkDeviceSize bytesMoved;
    2587  VkDeviceSize bytesFreed;
    2593 
    2632 VkResult vmaDefragment(
    2633  VmaAllocator allocator,
    2634  VmaAllocation* pAllocations,
    2635  size_t allocationCount,
    2636  VkBool32* pAllocationsChanged,
    2637  const VmaDefragmentationInfo *pDefragmentationInfo,
    2638  VmaDefragmentationStats* pDefragmentationStats);
    2639 
    2652 VkResult vmaBindBufferMemory(
    2653  VmaAllocator allocator,
    2654  VmaAllocation allocation,
    2655  VkBuffer buffer);
    2656 
    2669 VkResult vmaBindImageMemory(
    2670  VmaAllocator allocator,
    2671  VmaAllocation allocation,
    2672  VkImage image);
    2673 
    2700 VkResult vmaCreateBuffer(
    2701  VmaAllocator allocator,
    2702  const VkBufferCreateInfo* pBufferCreateInfo,
    2703  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2704  VkBuffer* pBuffer,
    2705  VmaAllocation* pAllocation,
    2706  VmaAllocationInfo* pAllocationInfo);
    2707 
    2719 void vmaDestroyBuffer(
    2720  VmaAllocator allocator,
    2721  VkBuffer buffer,
    2722  VmaAllocation allocation);
    2723 
    2725 VkResult vmaCreateImage(
    2726  VmaAllocator allocator,
    2727  const VkImageCreateInfo* pImageCreateInfo,
    2728  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2729  VkImage* pImage,
    2730  VmaAllocation* pAllocation,
    2731  VmaAllocationInfo* pAllocationInfo);
    2732 
    2744 void vmaDestroyImage(
    2745  VmaAllocator allocator,
    2746  VkImage image,
    2747  VmaAllocation allocation);
    2748 
    2749 #ifdef __cplusplus
    2750 }
    2751 #endif
    2752 
    2753 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2754 
    2755 // For Visual Studio IntelliSense.
    2756 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2757 #define VMA_IMPLEMENTATION
    2758 #endif
    2759 
    2760 #ifdef VMA_IMPLEMENTATION
    2761 #undef VMA_IMPLEMENTATION
    2762 
    2763 #include <cstdint>
    2764 #include <cstdlib>
    2765 #include <cstring>
    2766 
    2767 /*******************************************************************************
    2768 CONFIGURATION SECTION
    2769 
    2770 Define some of these macros before each #include of this header or change them
    2771 here if you need other then default behavior depending on your environment.
    2772 */
    2773 
    2774 /*
    2775 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2776 internally, like:
    2777 
    2778  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2779 
    2780 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2781 VmaAllocatorCreateInfo::pVulkanFunctions.
    2782 */
    2783 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2784 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2785 #endif
    2786 
    2787 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2788 //#define VMA_USE_STL_CONTAINERS 1
    2789 
    2790 /* Set this macro to 1 to make the library including and using STL containers:
    2791 std::pair, std::vector, std::list, std::unordered_map.
    2792 
    2793 Set it to 0 or undefined to make the library using its own implementation of
    2794 the containers.
    2795 */
    2796 #if VMA_USE_STL_CONTAINERS
    2797  #define VMA_USE_STL_VECTOR 1
    2798  #define VMA_USE_STL_UNORDERED_MAP 1
    2799  #define VMA_USE_STL_LIST 1
    2800 #endif
    2801 
    2802 #if VMA_USE_STL_VECTOR
    2803  #include <vector>
    2804 #endif
    2805 
    2806 #if VMA_USE_STL_UNORDERED_MAP
    2807  #include <unordered_map>
    2808 #endif
    2809 
    2810 #if VMA_USE_STL_LIST
    2811  #include <list>
    2812 #endif
    2813 
    2814 /*
    2815 Following headers are used in this CONFIGURATION section only, so feel free to
    2816 remove them if not needed.
    2817 */
    2818 #include <cassert> // for assert
    2819 #include <algorithm> // for min, max
    2820 #include <mutex> // for std::mutex
    2821 #include <atomic> // for std::atomic
    2822 
    2823 #ifndef VMA_NULL
    2824  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2825  #define VMA_NULL nullptr
    2826 #endif
    2827 
    2828 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2829 #include <cstdlib>
    2830 void *aligned_alloc(size_t alignment, size_t size)
    2831 {
    2832  // alignment must be >= sizeof(void*)
    2833  if(alignment < sizeof(void*))
    2834  {
    2835  alignment = sizeof(void*);
    2836  }
    2837 
    2838  return memalign(alignment, size);
    2839 }
    2840 #elif defined(__APPLE__) || defined(__ANDROID__)
    2841 #include <cstdlib>
    2842 void *aligned_alloc(size_t alignment, size_t size)
    2843 {
    2844  // alignment must be >= sizeof(void*)
    2845  if(alignment < sizeof(void*))
    2846  {
    2847  alignment = sizeof(void*);
    2848  }
    2849 
    2850  void *pointer;
    2851  if(posix_memalign(&pointer, alignment, size) == 0)
    2852  return pointer;
    2853  return VMA_NULL;
    2854 }
    2855 #endif
    2856 
    2857 // If your compiler is not compatible with C++11 and definition of
    2858 // aligned_alloc() function is missing, uncommeting following line may help:
    2859 
    2860 //#include <malloc.h>
    2861 
    2862 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2863 #ifndef VMA_ASSERT
    2864  #ifdef _DEBUG
    2865  #define VMA_ASSERT(expr) assert(expr)
    2866  #else
    2867  #define VMA_ASSERT(expr)
    2868  #endif
    2869 #endif
    2870 
    2871 // Assert that will be called very often, like inside data structures e.g. operator[].
    2872 // Making it non-empty can make program slow.
    2873 #ifndef VMA_HEAVY_ASSERT
    2874  #ifdef _DEBUG
    2875  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2876  #else
    2877  #define VMA_HEAVY_ASSERT(expr)
    2878  #endif
    2879 #endif
    2880 
    2881 #ifndef VMA_ALIGN_OF
    2882  #define VMA_ALIGN_OF(type) (__alignof(type))
    2883 #endif
    2884 
    2885 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2886  #if defined(_WIN32)
    2887  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2888  #else
    2889  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2890  #endif
    2891 #endif
    2892 
    2893 #ifndef VMA_SYSTEM_FREE
    2894  #if defined(_WIN32)
    2895  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2896  #else
    2897  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2898  #endif
    2899 #endif
    2900 
    2901 #ifndef VMA_MIN
    2902  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2903 #endif
    2904 
    2905 #ifndef VMA_MAX
    2906  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2907 #endif
    2908 
    2909 #ifndef VMA_SWAP
    2910  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2911 #endif
    2912 
    2913 #ifndef VMA_SORT
    2914  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2915 #endif
    2916 
    2917 #ifndef VMA_DEBUG_LOG
    2918  #define VMA_DEBUG_LOG(format, ...)
    2919  /*
    2920  #define VMA_DEBUG_LOG(format, ...) do { \
    2921  printf(format, __VA_ARGS__); \
    2922  printf("\n"); \
    2923  } while(false)
    2924  */
    2925 #endif
    2926 
    2927 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2928 #if VMA_STATS_STRING_ENABLED
    2929  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2930  {
    2931  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2932  }
    2933  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2934  {
    2935  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2936  }
    2937  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2938  {
    2939  snprintf(outStr, strLen, "%p", ptr);
    2940  }
    2941 #endif
    2942 
    2943 #ifndef VMA_MUTEX
    2944  class VmaMutex
    2945  {
    2946  public:
    2947  VmaMutex() { }
    2948  ~VmaMutex() { }
    2949  void Lock() { m_Mutex.lock(); }
    2950  void Unlock() { m_Mutex.unlock(); }
    2951  private:
    2952  std::mutex m_Mutex;
    2953  };
    2954  #define VMA_MUTEX VmaMutex
    2955 #endif
    2956 
    2957 /*
    2958 If providing your own implementation, you need to implement a subset of std::atomic:
    2959 
    2960 - Constructor(uint32_t desired)
    2961 - uint32_t load() const
    2962 - void store(uint32_t desired)
    2963 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2964 */
    2965 #ifndef VMA_ATOMIC_UINT32
    2966  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2967 #endif
    2968 
    2969 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2970 
    2974  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2975 #endif
    2976 
    2977 #ifndef VMA_DEBUG_ALIGNMENT
    2978 
    2982  #define VMA_DEBUG_ALIGNMENT (1)
    2983 #endif
    2984 
    2985 #ifndef VMA_DEBUG_MARGIN
    2986 
    2990  #define VMA_DEBUG_MARGIN (0)
    2991 #endif
    2992 
    2993 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2994 
    2998  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    2999 #endif
    3000 
    3001 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3002 
    3007  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3008 #endif
    3009 
    3010 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3011 
    3015  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3016 #endif
    3017 
    3018 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3019 
    3023  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3024 #endif
    3025 
    3026 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3027  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3029 #endif
    3030 
    3031 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3032  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3034 #endif
    3035 
    3036 #ifndef VMA_CLASS_NO_COPY
    3037  #define VMA_CLASS_NO_COPY(className) \
    3038  private: \
    3039  className(const className&) = delete; \
    3040  className& operator=(const className&) = delete;
    3041 #endif
    3042 
    3043 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3044 
    3045 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3046 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3047 
    3048 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3049 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3050 
    3051 /*******************************************************************************
    3052 END OF CONFIGURATION
    3053 */
    3054 
    3055 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3056  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3057 
    3058 // Returns number of bits set to 1 in (v).
    3059 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3060 {
    3061  uint32_t c = v - ((v >> 1) & 0x55555555);
    3062  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3063  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3064  c = ((c >> 8) + c) & 0x00FF00FF;
    3065  c = ((c >> 16) + c) & 0x0000FFFF;
    3066  return c;
    3067 }
    3068 
    3069 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3070 // Use types like uint32_t, uint64_t as T.
    3071 template <typename T>
    3072 static inline T VmaAlignUp(T val, T align)
    3073 {
    3074  return (val + align - 1) / align * align;
    3075 }
    3076 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3077 // Use types like uint32_t, uint64_t as T.
    3078 template <typename T>
    3079 static inline T VmaAlignDown(T val, T align)
    3080 {
    3081  return val / align * align;
    3082 }
    3083 
    3084 // Division with mathematical rounding to nearest number.
    3085 template <typename T>
    3086 static inline T VmaRoundDiv(T x, T y)
    3087 {
    3088  return (x + (y / (T)2)) / y;
    3089 }
    3090 
    3091 /*
    3092 Returns true if given number is a power of two.
    3093 T must be unsigned integer number or signed integer but always nonnegative.
    3094 For 0 returns true.
    3095 */
    3096 template <typename T>
    3097 inline bool VmaIsPow2(T x)
    3098 {
    3099  return (x & (x-1)) == 0;
    3100 }
    3101 
    3102 // Returns smallest power of 2 greater or equal to v.
    3103 static inline uint32_t VmaNextPow2(uint32_t v)
    3104 {
    3105  v--;
    3106  v |= v >> 1;
    3107  v |= v >> 2;
    3108  v |= v >> 4;
    3109  v |= v >> 8;
    3110  v |= v >> 16;
    3111  v++;
    3112  return v;
    3113 }
    3114 static inline uint64_t VmaNextPow2(uint64_t v)
    3115 {
    3116  v--;
    3117  v |= v >> 1;
    3118  v |= v >> 2;
    3119  v |= v >> 4;
    3120  v |= v >> 8;
    3121  v |= v >> 16;
    3122  v |= v >> 32;
    3123  v++;
    3124  return v;
    3125 }
    3126 
    3127 // Returns largest power of 2 less or equal to v.
    3128 static inline uint32_t VmaPrevPow2(uint32_t v)
    3129 {
    3130  v |= v >> 1;
    3131  v |= v >> 2;
    3132  v |= v >> 4;
    3133  v |= v >> 8;
    3134  v |= v >> 16;
    3135  v = v ^ (v >> 1);
    3136  return v;
    3137 }
    3138 static inline uint64_t VmaPrevPow2(uint64_t v)
    3139 {
    3140  v |= v >> 1;
    3141  v |= v >> 2;
    3142  v |= v >> 4;
    3143  v |= v >> 8;
    3144  v |= v >> 16;
    3145  v |= v >> 32;
    3146  v = v ^ (v >> 1);
    3147  return v;
    3148 }
    3149 
    3150 static inline bool VmaStrIsEmpty(const char* pStr)
    3151 {
    3152  return pStr == VMA_NULL || *pStr == '\0';
    3153 }
    3154 
    3155 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3156 {
    3157  switch(algorithm)
    3158  {
    3160  return "Linear";
    3162  return "Buddy";
    3163  case 0:
    3164  return "Default";
    3165  default:
    3166  VMA_ASSERT(0);
    3167  return "";
    3168  }
    3169 }
    3170 
    3171 #ifndef VMA_SORT
    3172 
    3173 template<typename Iterator, typename Compare>
    3174 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3175 {
    3176  Iterator centerValue = end; --centerValue;
    3177  Iterator insertIndex = beg;
    3178  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3179  {
    3180  if(cmp(*memTypeIndex, *centerValue))
    3181  {
    3182  if(insertIndex != memTypeIndex)
    3183  {
    3184  VMA_SWAP(*memTypeIndex, *insertIndex);
    3185  }
    3186  ++insertIndex;
    3187  }
    3188  }
    3189  if(insertIndex != centerValue)
    3190  {
    3191  VMA_SWAP(*insertIndex, *centerValue);
    3192  }
    3193  return insertIndex;
    3194 }
    3195 
    3196 template<typename Iterator, typename Compare>
    3197 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3198 {
    3199  if(beg < end)
    3200  {
    3201  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3202  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3203  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3204  }
    3205 }
    3206 
    3207 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3208 
    3209 #endif // #ifndef VMA_SORT
    3210 
    3211 /*
    3212 Returns true if two memory blocks occupy overlapping pages.
    3213 ResourceA must be in less memory offset than ResourceB.
    3214 
    3215 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3216 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3217 */
    3218 static inline bool VmaBlocksOnSamePage(
    3219  VkDeviceSize resourceAOffset,
    3220  VkDeviceSize resourceASize,
    3221  VkDeviceSize resourceBOffset,
    3222  VkDeviceSize pageSize)
    3223 {
    3224  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3225  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3226  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3227  VkDeviceSize resourceBStart = resourceBOffset;
    3228  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3229  return resourceAEndPage == resourceBStartPage;
    3230 }
    3231 
    3232 enum VmaSuballocationType
    3233 {
    3234  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3235  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3236  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3237  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3238  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3239  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3240  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3241 };
    3242 
    3243 /*
    3244 Returns true if given suballocation types could conflict and must respect
    3245 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3246 or linear image and another one is optimal image. If type is unknown, behave
    3247 conservatively.
    3248 */
    3249 static inline bool VmaIsBufferImageGranularityConflict(
    3250  VmaSuballocationType suballocType1,
    3251  VmaSuballocationType suballocType2)
    3252 {
    3253  if(suballocType1 > suballocType2)
    3254  {
    3255  VMA_SWAP(suballocType1, suballocType2);
    3256  }
    3257 
    3258  switch(suballocType1)
    3259  {
    3260  case VMA_SUBALLOCATION_TYPE_FREE:
    3261  return false;
    3262  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3263  return true;
    3264  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3265  return
    3266  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3267  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3268  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3272  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3273  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3274  return
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3276  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3277  return false;
    3278  default:
    3279  VMA_ASSERT(0);
    3280  return true;
    3281  }
    3282 }
    3283 
    3284 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3285 {
    3286  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3287  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3288  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3289  {
    3290  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3291  }
    3292 }
    3293 
    3294 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3295 {
    3296  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3297  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3298  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3299  {
    3300  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3301  {
    3302  return false;
    3303  }
    3304  }
    3305  return true;
    3306 }
    3307 
    3308 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3309 struct VmaMutexLock
    3310 {
    3311  VMA_CLASS_NO_COPY(VmaMutexLock)
    3312 public:
    3313  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3314  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3315  {
    3316  if(m_pMutex)
    3317  {
    3318  m_pMutex->Lock();
    3319  }
    3320  }
    3321 
    3322  ~VmaMutexLock()
    3323  {
    3324  if(m_pMutex)
    3325  {
    3326  m_pMutex->Unlock();
    3327  }
    3328  }
    3329 
    3330 private:
    3331  VMA_MUTEX* m_pMutex;
    3332 };
    3333 
    3334 #if VMA_DEBUG_GLOBAL_MUTEX
    3335  static VMA_MUTEX gDebugGlobalMutex;
    3336  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3337 #else
    3338  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3339 #endif
    3340 
    3341 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3342 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3343 
    3344 /*
    3345 Performs binary search and returns iterator to first element that is greater or
    3346 equal to (key), according to comparison (cmp).
    3347 
    3348 Cmp should return true if first argument is less than second argument.
    3349 
    3350 Returned value is the found element, if present in the collection or place where
    3351 new element with value (key) should be inserted.
    3352 */
    3353 template <typename CmpLess, typename IterT, typename KeyT>
    3354 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3355 {
    3356  size_t down = 0, up = (end - beg);
    3357  while(down < up)
    3358  {
    3359  const size_t mid = (down + up) / 2;
    3360  if(cmp(*(beg+mid), key))
    3361  {
    3362  down = mid + 1;
    3363  }
    3364  else
    3365  {
    3366  up = mid;
    3367  }
    3368  }
    3369  return beg + down;
    3370 }
    3371 
    3373 // Memory allocation
    3374 
    3375 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3376 {
    3377  if((pAllocationCallbacks != VMA_NULL) &&
    3378  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3379  {
    3380  return (*pAllocationCallbacks->pfnAllocation)(
    3381  pAllocationCallbacks->pUserData,
    3382  size,
    3383  alignment,
    3384  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3385  }
    3386  else
    3387  {
    3388  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3389  }
    3390 }
    3391 
    3392 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3393 {
    3394  if((pAllocationCallbacks != VMA_NULL) &&
    3395  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3396  {
    3397  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3398  }
    3399  else
    3400  {
    3401  VMA_SYSTEM_FREE(ptr);
    3402  }
    3403 }
    3404 
    3405 template<typename T>
    3406 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3407 {
    3408  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3409 }
    3410 
    3411 template<typename T>
    3412 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3413 {
    3414  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3415 }
    3416 
    3417 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3418 
    3419 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3420 
    3421 template<typename T>
    3422 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3423 {
    3424  ptr->~T();
    3425  VmaFree(pAllocationCallbacks, ptr);
    3426 }
    3427 
    3428 template<typename T>
    3429 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3430 {
    3431  if(ptr != VMA_NULL)
    3432  {
    3433  for(size_t i = count; i--; )
    3434  {
    3435  ptr[i].~T();
    3436  }
    3437  VmaFree(pAllocationCallbacks, ptr);
    3438  }
    3439 }
    3440 
    3441 // STL-compatible allocator.
    3442 template<typename T>
    3443 class VmaStlAllocator
    3444 {
    3445 public:
    3446  const VkAllocationCallbacks* const m_pCallbacks;
    3447  typedef T value_type;
    3448 
    3449  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3450  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3451 
    3452  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3453  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3454 
    3455  template<typename U>
    3456  bool operator==(const VmaStlAllocator<U>& rhs) const
    3457  {
    3458  return m_pCallbacks == rhs.m_pCallbacks;
    3459  }
    3460  template<typename U>
    3461  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3462  {
    3463  return m_pCallbacks != rhs.m_pCallbacks;
    3464  }
    3465 
    3466  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3467 };
    3468 
    3469 #if VMA_USE_STL_VECTOR
    3470 
    3471 #define VmaVector std::vector
    3472 
    3473 template<typename T, typename allocatorT>
    3474 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3475 {
    3476  vec.insert(vec.begin() + index, item);
    3477 }
    3478 
    3479 template<typename T, typename allocatorT>
    3480 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3481 {
    3482  vec.erase(vec.begin() + index);
    3483 }
    3484 
    3485 #else // #if VMA_USE_STL_VECTOR
    3486 
    3487 /* Class with interface compatible with subset of std::vector.
    3488 T must be POD because constructors and destructors are not called and memcpy is
    3489 used for these objects. */
    3490 template<typename T, typename AllocatorT>
    3491 class VmaVector
    3492 {
    3493 public:
    3494  typedef T value_type;
    3495 
    3496  VmaVector(const AllocatorT& allocator) :
    3497  m_Allocator(allocator),
    3498  m_pArray(VMA_NULL),
    3499  m_Count(0),
    3500  m_Capacity(0)
    3501  {
    3502  }
    3503 
    3504  VmaVector(size_t count, const AllocatorT& allocator) :
    3505  m_Allocator(allocator),
    3506  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3507  m_Count(count),
    3508  m_Capacity(count)
    3509  {
    3510  }
    3511 
    3512  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3513  m_Allocator(src.m_Allocator),
    3514  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3515  m_Count(src.m_Count),
    3516  m_Capacity(src.m_Count)
    3517  {
    3518  if(m_Count != 0)
    3519  {
    3520  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3521  }
    3522  }
    3523 
    3524  ~VmaVector()
    3525  {
    3526  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3527  }
    3528 
    3529  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3530  {
    3531  if(&rhs != this)
    3532  {
    3533  resize(rhs.m_Count);
    3534  if(m_Count != 0)
    3535  {
    3536  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3537  }
    3538  }
    3539  return *this;
    3540  }
    3541 
    3542  bool empty() const { return m_Count == 0; }
    3543  size_t size() const { return m_Count; }
    3544  T* data() { return m_pArray; }
    3545  const T* data() const { return m_pArray; }
    3546 
    3547  T& operator[](size_t index)
    3548  {
    3549  VMA_HEAVY_ASSERT(index < m_Count);
    3550  return m_pArray[index];
    3551  }
    3552  const T& operator[](size_t index) const
    3553  {
    3554  VMA_HEAVY_ASSERT(index < m_Count);
    3555  return m_pArray[index];
    3556  }
    3557 
    3558  T& front()
    3559  {
    3560  VMA_HEAVY_ASSERT(m_Count > 0);
    3561  return m_pArray[0];
    3562  }
    3563  const T& front() const
    3564  {
    3565  VMA_HEAVY_ASSERT(m_Count > 0);
    3566  return m_pArray[0];
    3567  }
    3568  T& back()
    3569  {
    3570  VMA_HEAVY_ASSERT(m_Count > 0);
    3571  return m_pArray[m_Count - 1];
    3572  }
    3573  const T& back() const
    3574  {
    3575  VMA_HEAVY_ASSERT(m_Count > 0);
    3576  return m_pArray[m_Count - 1];
    3577  }
    3578 
    3579  void reserve(size_t newCapacity, bool freeMemory = false)
    3580  {
    3581  newCapacity = VMA_MAX(newCapacity, m_Count);
    3582 
    3583  if((newCapacity < m_Capacity) && !freeMemory)
    3584  {
    3585  newCapacity = m_Capacity;
    3586  }
    3587 
    3588  if(newCapacity != m_Capacity)
    3589  {
    3590  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3591  if(m_Count != 0)
    3592  {
    3593  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3594  }
    3595  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3596  m_Capacity = newCapacity;
    3597  m_pArray = newArray;
    3598  }
    3599  }
    3600 
    3601  void resize(size_t newCount, bool freeMemory = false)
    3602  {
    3603  size_t newCapacity = m_Capacity;
    3604  if(newCount > m_Capacity)
    3605  {
    3606  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3607  }
    3608  else if(freeMemory)
    3609  {
    3610  newCapacity = newCount;
    3611  }
    3612 
    3613  if(newCapacity != m_Capacity)
    3614  {
    3615  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3616  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3617  if(elementsToCopy != 0)
    3618  {
    3619  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3620  }
    3621  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3622  m_Capacity = newCapacity;
    3623  m_pArray = newArray;
    3624  }
    3625 
    3626  m_Count = newCount;
    3627  }
    3628 
    3629  void clear(bool freeMemory = false)
    3630  {
    3631  resize(0, freeMemory);
    3632  }
    3633 
    3634  void insert(size_t index, const T& src)
    3635  {
    3636  VMA_HEAVY_ASSERT(index <= m_Count);
    3637  const size_t oldCount = size();
    3638  resize(oldCount + 1);
    3639  if(index < oldCount)
    3640  {
    3641  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3642  }
    3643  m_pArray[index] = src;
    3644  }
    3645 
    3646  void remove(size_t index)
    3647  {
    3648  VMA_HEAVY_ASSERT(index < m_Count);
    3649  const size_t oldCount = size();
    3650  if(index < oldCount - 1)
    3651  {
    3652  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3653  }
    3654  resize(oldCount - 1);
    3655  }
    3656 
    3657  void push_back(const T& src)
    3658  {
    3659  const size_t newIndex = size();
    3660  resize(newIndex + 1);
    3661  m_pArray[newIndex] = src;
    3662  }
    3663 
    3664  void pop_back()
    3665  {
    3666  VMA_HEAVY_ASSERT(m_Count > 0);
    3667  resize(size() - 1);
    3668  }
    3669 
    3670  void push_front(const T& src)
    3671  {
    3672  insert(0, src);
    3673  }
    3674 
    3675  void pop_front()
    3676  {
    3677  VMA_HEAVY_ASSERT(m_Count > 0);
    3678  remove(0);
    3679  }
    3680 
    3681  typedef T* iterator;
    3682 
    3683  iterator begin() { return m_pArray; }
    3684  iterator end() { return m_pArray + m_Count; }
    3685 
    3686 private:
    3687  AllocatorT m_Allocator;
    3688  T* m_pArray;
    3689  size_t m_Count;
    3690  size_t m_Capacity;
    3691 };
    3692 
    3693 template<typename T, typename allocatorT>
    3694 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3695 {
    3696  vec.insert(index, item);
    3697 }
    3698 
    3699 template<typename T, typename allocatorT>
    3700 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3701 {
    3702  vec.remove(index);
    3703 }
    3704 
    3705 #endif // #if VMA_USE_STL_VECTOR
    3706 
    3707 template<typename CmpLess, typename VectorT>
    3708 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3709 {
    3710  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3711  vector.data(),
    3712  vector.data() + vector.size(),
    3713  value,
    3714  CmpLess()) - vector.data();
    3715  VmaVectorInsert(vector, indexToInsert, value);
    3716  return indexToInsert;
    3717 }
    3718 
    3719 template<typename CmpLess, typename VectorT>
    3720 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3721 {
    3722  CmpLess comparator;
    3723  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3724  vector.begin(),
    3725  vector.end(),
    3726  value,
    3727  comparator);
    3728  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3729  {
    3730  size_t indexToRemove = it - vector.begin();
    3731  VmaVectorRemove(vector, indexToRemove);
    3732  return true;
    3733  }
    3734  return false;
    3735 }
    3736 
    3737 template<typename CmpLess, typename IterT, typename KeyT>
    3738 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3739 {
    3740  CmpLess comparator;
    3741  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3742  beg, end, value, comparator);
    3743  if(it == end ||
    3744  (!comparator(*it, value) && !comparator(value, *it)))
    3745  {
    3746  return it;
    3747  }
    3748  return end;
    3749 }
    3750 
    3752 // class VmaPoolAllocator
    3753 
    3754 /*
    3755 Allocator for objects of type T using a list of arrays (pools) to speed up
    3756 allocation. Number of elements that can be allocated is not bounded because
    3757 allocator can create multiple blocks.
    3758 */
    3759 template<typename T>
    3760 class VmaPoolAllocator
    3761 {
    3762  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3763 public:
    3764  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3765  ~VmaPoolAllocator();
    3766  void Clear();
    3767  T* Alloc();
    3768  void Free(T* ptr);
    3769 
    3770 private:
    3771  union Item
    3772  {
    3773  uint32_t NextFreeIndex;
    3774  T Value;
    3775  };
    3776 
    3777  struct ItemBlock
    3778  {
    3779  Item* pItems;
    3780  uint32_t FirstFreeIndex;
    3781  };
    3782 
    3783  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3784  size_t m_ItemsPerBlock;
    3785  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3786 
    3787  ItemBlock& CreateNewBlock();
    3788 };
    3789 
    3790 template<typename T>
    3791 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3792  m_pAllocationCallbacks(pAllocationCallbacks),
    3793  m_ItemsPerBlock(itemsPerBlock),
    3794  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3795 {
    3796  VMA_ASSERT(itemsPerBlock > 0);
    3797 }
    3798 
    3799 template<typename T>
    3800 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3801 {
    3802  Clear();
    3803 }
    3804 
    3805 template<typename T>
    3806 void VmaPoolAllocator<T>::Clear()
    3807 {
    3808  for(size_t i = m_ItemBlocks.size(); i--; )
    3809  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3810  m_ItemBlocks.clear();
    3811 }
    3812 
    3813 template<typename T>
    3814 T* VmaPoolAllocator<T>::Alloc()
    3815 {
    3816  for(size_t i = m_ItemBlocks.size(); i--; )
    3817  {
    3818  ItemBlock& block = m_ItemBlocks[i];
    3819  // This block has some free items: Use first one.
    3820  if(block.FirstFreeIndex != UINT32_MAX)
    3821  {
    3822  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3823  block.FirstFreeIndex = pItem->NextFreeIndex;
    3824  return &pItem->Value;
    3825  }
    3826  }
    3827 
    3828  // No block has free item: Create new one and use it.
    3829  ItemBlock& newBlock = CreateNewBlock();
    3830  Item* const pItem = &newBlock.pItems[0];
    3831  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3832  return &pItem->Value;
    3833 }
    3834 
    3835 template<typename T>
    3836 void VmaPoolAllocator<T>::Free(T* ptr)
    3837 {
    3838  // Search all memory blocks to find ptr.
    3839  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3840  {
    3841  ItemBlock& block = m_ItemBlocks[i];
    3842 
    3843  // Casting to union.
    3844  Item* pItemPtr;
    3845  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3846 
    3847  // Check if pItemPtr is in address range of this block.
    3848  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3849  {
    3850  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3851  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3852  block.FirstFreeIndex = index;
    3853  return;
    3854  }
    3855  }
    3856  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3857 }
    3858 
    3859 template<typename T>
    3860 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3861 {
    3862  ItemBlock newBlock = {
    3863  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3864 
    3865  m_ItemBlocks.push_back(newBlock);
    3866 
    3867  // Setup singly-linked list of all free items in this block.
    3868  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3869  newBlock.pItems[i].NextFreeIndex = i + 1;
    3870  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3871  return m_ItemBlocks.back();
    3872 }
    3873 
    3875 // class VmaRawList, VmaList
    3876 
    3877 #if VMA_USE_STL_LIST
    3878 
    3879 #define VmaList std::list
    3880 
    3881 #else // #if VMA_USE_STL_LIST
    3882 
    3883 template<typename T>
    3884 struct VmaListItem
    3885 {
    3886  VmaListItem* pPrev;
    3887  VmaListItem* pNext;
    3888  T Value;
    3889 };
    3890 
    3891 // Doubly linked list.
    3892 template<typename T>
    3893 class VmaRawList
    3894 {
    3895  VMA_CLASS_NO_COPY(VmaRawList)
    3896 public:
    3897  typedef VmaListItem<T> ItemType;
    3898 
    3899  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3900  ~VmaRawList();
    3901  void Clear();
    3902 
    3903  size_t GetCount() const { return m_Count; }
    3904  bool IsEmpty() const { return m_Count == 0; }
    3905 
    3906  ItemType* Front() { return m_pFront; }
    3907  const ItemType* Front() const { return m_pFront; }
    3908  ItemType* Back() { return m_pBack; }
    3909  const ItemType* Back() const { return m_pBack; }
    3910 
    3911  ItemType* PushBack();
    3912  ItemType* PushFront();
    3913  ItemType* PushBack(const T& value);
    3914  ItemType* PushFront(const T& value);
    3915  void PopBack();
    3916  void PopFront();
    3917 
    3918  // Item can be null - it means PushBack.
    3919  ItemType* InsertBefore(ItemType* pItem);
    3920  // Item can be null - it means PushFront.
    3921  ItemType* InsertAfter(ItemType* pItem);
    3922 
    3923  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3924  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3925 
    3926  void Remove(ItemType* pItem);
    3927 
    3928 private:
    3929  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3930  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3931  ItemType* m_pFront;
    3932  ItemType* m_pBack;
    3933  size_t m_Count;
    3934 };
    3935 
    3936 template<typename T>
    3937 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3938  m_pAllocationCallbacks(pAllocationCallbacks),
    3939  m_ItemAllocator(pAllocationCallbacks, 128),
    3940  m_pFront(VMA_NULL),
    3941  m_pBack(VMA_NULL),
    3942  m_Count(0)
    3943 {
    3944 }
    3945 
    3946 template<typename T>
    3947 VmaRawList<T>::~VmaRawList()
    3948 {
    3949  // Intentionally not calling Clear, because that would be unnecessary
    3950  // computations to return all items to m_ItemAllocator as free.
    3951 }
    3952 
    3953 template<typename T>
    3954 void VmaRawList<T>::Clear()
    3955 {
    3956  if(IsEmpty() == false)
    3957  {
    3958  ItemType* pItem = m_pBack;
    3959  while(pItem != VMA_NULL)
    3960  {
    3961  ItemType* const pPrevItem = pItem->pPrev;
    3962  m_ItemAllocator.Free(pItem);
    3963  pItem = pPrevItem;
    3964  }
    3965  m_pFront = VMA_NULL;
    3966  m_pBack = VMA_NULL;
    3967  m_Count = 0;
    3968  }
    3969 }
    3970 
    3971 template<typename T>
    3972 VmaListItem<T>* VmaRawList<T>::PushBack()
    3973 {
    3974  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3975  pNewItem->pNext = VMA_NULL;
    3976  if(IsEmpty())
    3977  {
    3978  pNewItem->pPrev = VMA_NULL;
    3979  m_pFront = pNewItem;
    3980  m_pBack = pNewItem;
    3981  m_Count = 1;
    3982  }
    3983  else
    3984  {
    3985  pNewItem->pPrev = m_pBack;
    3986  m_pBack->pNext = pNewItem;
    3987  m_pBack = pNewItem;
    3988  ++m_Count;
    3989  }
    3990  return pNewItem;
    3991 }
    3992 
    3993 template<typename T>
    3994 VmaListItem<T>* VmaRawList<T>::PushFront()
    3995 {
    3996  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3997  pNewItem->pPrev = VMA_NULL;
    3998  if(IsEmpty())
    3999  {
    4000  pNewItem->pNext = VMA_NULL;
    4001  m_pFront = pNewItem;
    4002  m_pBack = pNewItem;
    4003  m_Count = 1;
    4004  }
    4005  else
    4006  {
    4007  pNewItem->pNext = m_pFront;
    4008  m_pFront->pPrev = pNewItem;
    4009  m_pFront = pNewItem;
    4010  ++m_Count;
    4011  }
    4012  return pNewItem;
    4013 }
    4014 
    4015 template<typename T>
    4016 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4017 {
    4018  ItemType* const pNewItem = PushBack();
    4019  pNewItem->Value = value;
    4020  return pNewItem;
    4021 }
    4022 
    4023 template<typename T>
    4024 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4025 {
    4026  ItemType* const pNewItem = PushFront();
    4027  pNewItem->Value = value;
    4028  return pNewItem;
    4029 }
    4030 
    4031 template<typename T>
    4032 void VmaRawList<T>::PopBack()
    4033 {
    4034  VMA_HEAVY_ASSERT(m_Count > 0);
    4035  ItemType* const pBackItem = m_pBack;
    4036  ItemType* const pPrevItem = pBackItem->pPrev;
    4037  if(pPrevItem != VMA_NULL)
    4038  {
    4039  pPrevItem->pNext = VMA_NULL;
    4040  }
    4041  m_pBack = pPrevItem;
    4042  m_ItemAllocator.Free(pBackItem);
    4043  --m_Count;
    4044 }
    4045 
    4046 template<typename T>
    4047 void VmaRawList<T>::PopFront()
    4048 {
    4049  VMA_HEAVY_ASSERT(m_Count > 0);
    4050  ItemType* const pFrontItem = m_pFront;
    4051  ItemType* const pNextItem = pFrontItem->pNext;
    4052  if(pNextItem != VMA_NULL)
    4053  {
    4054  pNextItem->pPrev = VMA_NULL;
    4055  }
    4056  m_pFront = pNextItem;
    4057  m_ItemAllocator.Free(pFrontItem);
    4058  --m_Count;
    4059 }
    4060 
    4061 template<typename T>
    4062 void VmaRawList<T>::Remove(ItemType* pItem)
    4063 {
    4064  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4065  VMA_HEAVY_ASSERT(m_Count > 0);
    4066 
    4067  if(pItem->pPrev != VMA_NULL)
    4068  {
    4069  pItem->pPrev->pNext = pItem->pNext;
    4070  }
    4071  else
    4072  {
    4073  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4074  m_pFront = pItem->pNext;
    4075  }
    4076 
    4077  if(pItem->pNext != VMA_NULL)
    4078  {
    4079  pItem->pNext->pPrev = pItem->pPrev;
    4080  }
    4081  else
    4082  {
    4083  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4084  m_pBack = pItem->pPrev;
    4085  }
    4086 
    4087  m_ItemAllocator.Free(pItem);
    4088  --m_Count;
    4089 }
    4090 
    4091 template<typename T>
    4092 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4093 {
    4094  if(pItem != VMA_NULL)
    4095  {
    4096  ItemType* const prevItem = pItem->pPrev;
    4097  ItemType* const newItem = m_ItemAllocator.Alloc();
    4098  newItem->pPrev = prevItem;
    4099  newItem->pNext = pItem;
    4100  pItem->pPrev = newItem;
    4101  if(prevItem != VMA_NULL)
    4102  {
    4103  prevItem->pNext = newItem;
    4104  }
    4105  else
    4106  {
    4107  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4108  m_pFront = newItem;
    4109  }
    4110  ++m_Count;
    4111  return newItem;
    4112  }
    4113  else
    4114  return PushBack();
    4115 }
    4116 
    4117 template<typename T>
    4118 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4119 {
    4120  if(pItem != VMA_NULL)
    4121  {
    4122  ItemType* const nextItem = pItem->pNext;
    4123  ItemType* const newItem = m_ItemAllocator.Alloc();
    4124  newItem->pNext = nextItem;
    4125  newItem->pPrev = pItem;
    4126  pItem->pNext = newItem;
    4127  if(nextItem != VMA_NULL)
    4128  {
    4129  nextItem->pPrev = newItem;
    4130  }
    4131  else
    4132  {
    4133  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4134  m_pBack = newItem;
    4135  }
    4136  ++m_Count;
    4137  return newItem;
    4138  }
    4139  else
    4140  return PushFront();
    4141 }
    4142 
    4143 template<typename T>
    4144 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4145 {
    4146  ItemType* const newItem = InsertBefore(pItem);
    4147  newItem->Value = value;
    4148  return newItem;
    4149 }
    4150 
    4151 template<typename T>
    4152 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4153 {
    4154  ItemType* const newItem = InsertAfter(pItem);
    4155  newItem->Value = value;
    4156  return newItem;
    4157 }
    4158 
    4159 template<typename T, typename AllocatorT>
    4160 class VmaList
    4161 {
    4162  VMA_CLASS_NO_COPY(VmaList)
    4163 public:
    4164  class iterator
    4165  {
    4166  public:
    4167  iterator() :
    4168  m_pList(VMA_NULL),
    4169  m_pItem(VMA_NULL)
    4170  {
    4171  }
    4172 
    4173  T& operator*() const
    4174  {
    4175  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4176  return m_pItem->Value;
    4177  }
    4178  T* operator->() const
    4179  {
    4180  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4181  return &m_pItem->Value;
    4182  }
    4183 
    4184  iterator& operator++()
    4185  {
    4186  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4187  m_pItem = m_pItem->pNext;
    4188  return *this;
    4189  }
    4190  iterator& operator--()
    4191  {
    4192  if(m_pItem != VMA_NULL)
    4193  {
    4194  m_pItem = m_pItem->pPrev;
    4195  }
    4196  else
    4197  {
    4198  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4199  m_pItem = m_pList->Back();
    4200  }
    4201  return *this;
    4202  }
    4203 
    4204  iterator operator++(int)
    4205  {
    4206  iterator result = *this;
    4207  ++*this;
    4208  return result;
    4209  }
    4210  iterator operator--(int)
    4211  {
    4212  iterator result = *this;
    4213  --*this;
    4214  return result;
    4215  }
    4216 
    4217  bool operator==(const iterator& rhs) const
    4218  {
    4219  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4220  return m_pItem == rhs.m_pItem;
    4221  }
    4222  bool operator!=(const iterator& rhs) const
    4223  {
    4224  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4225  return m_pItem != rhs.m_pItem;
    4226  }
    4227 
    4228  private:
    4229  VmaRawList<T>* m_pList;
    4230  VmaListItem<T>* m_pItem;
    4231 
    4232  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4233  m_pList(pList),
    4234  m_pItem(pItem)
    4235  {
    4236  }
    4237 
    4238  friend class VmaList<T, AllocatorT>;
    4239  };
    4240 
    4241  class const_iterator
    4242  {
    4243  public:
    4244  const_iterator() :
    4245  m_pList(VMA_NULL),
    4246  m_pItem(VMA_NULL)
    4247  {
    4248  }
    4249 
    4250  const_iterator(const iterator& src) :
    4251  m_pList(src.m_pList),
    4252  m_pItem(src.m_pItem)
    4253  {
    4254  }
    4255 
    4256  const T& operator*() const
    4257  {
    4258  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4259  return m_pItem->Value;
    4260  }
    4261  const T* operator->() const
    4262  {
    4263  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4264  return &m_pItem->Value;
    4265  }
    4266 
    4267  const_iterator& operator++()
    4268  {
    4269  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4270  m_pItem = m_pItem->pNext;
    4271  return *this;
    4272  }
    4273  const_iterator& operator--()
    4274  {
    4275  if(m_pItem != VMA_NULL)
    4276  {
    4277  m_pItem = m_pItem->pPrev;
    4278  }
    4279  else
    4280  {
    4281  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4282  m_pItem = m_pList->Back();
    4283  }
    4284  return *this;
    4285  }
    4286 
    4287  const_iterator operator++(int)
    4288  {
    4289  const_iterator result = *this;
    4290  ++*this;
    4291  return result;
    4292  }
    4293  const_iterator operator--(int)
    4294  {
    4295  const_iterator result = *this;
    4296  --*this;
    4297  return result;
    4298  }
    4299 
    4300  bool operator==(const const_iterator& rhs) const
    4301  {
    4302  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4303  return m_pItem == rhs.m_pItem;
    4304  }
    4305  bool operator!=(const const_iterator& rhs) const
    4306  {
    4307  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4308  return m_pItem != rhs.m_pItem;
    4309  }
    4310 
    4311  private:
    4312  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4313  m_pList(pList),
    4314  m_pItem(pItem)
    4315  {
    4316  }
    4317 
    4318  const VmaRawList<T>* m_pList;
    4319  const VmaListItem<T>* m_pItem;
    4320 
    4321  friend class VmaList<T, AllocatorT>;
    4322  };
    4323 
    4324  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4325 
    4326  bool empty() const { return m_RawList.IsEmpty(); }
    4327  size_t size() const { return m_RawList.GetCount(); }
    4328 
    4329  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4330  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4331 
    4332  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4333  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4334 
    4335  void clear() { m_RawList.Clear(); }
    4336  void push_back(const T& value) { m_RawList.PushBack(value); }
    4337  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4338  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4339 
    4340 private:
    4341  VmaRawList<T> m_RawList;
    4342 };
    4343 
    4344 #endif // #if VMA_USE_STL_LIST
    4345 
    4347 // class VmaMap
    4348 
    4349 // Unused in this version.
    4350 #if 0
    4351 
    4352 #if VMA_USE_STL_UNORDERED_MAP
    4353 
    4354 #define VmaPair std::pair
    4355 
    4356 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4357  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4358 
    4359 #else // #if VMA_USE_STL_UNORDERED_MAP
    4360 
    4361 template<typename T1, typename T2>
    4362 struct VmaPair
    4363 {
    4364  T1 first;
    4365  T2 second;
    4366 
    4367  VmaPair() : first(), second() { }
    4368  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4369 };
    4370 
    4371 /* Class compatible with subset of interface of std::unordered_map.
    4372 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4373 */
    4374 template<typename KeyT, typename ValueT>
    4375 class VmaMap
    4376 {
    4377 public:
    4378  typedef VmaPair<KeyT, ValueT> PairType;
    4379  typedef PairType* iterator;
    4380 
    4381  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4382 
    4383  iterator begin() { return m_Vector.begin(); }
    4384  iterator end() { return m_Vector.end(); }
    4385 
    4386  void insert(const PairType& pair);
    4387  iterator find(const KeyT& key);
    4388  void erase(iterator it);
    4389 
    4390 private:
    4391  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4392 };
    4393 
    4394 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4395 
    4396 template<typename FirstT, typename SecondT>
    4397 struct VmaPairFirstLess
    4398 {
    4399  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4400  {
    4401  return lhs.first < rhs.first;
    4402  }
    4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4404  {
    4405  return lhs.first < rhsFirst;
    4406  }
    4407 };
    4408 
    4409 template<typename KeyT, typename ValueT>
    4410 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4411 {
    4412  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4413  m_Vector.data(),
    4414  m_Vector.data() + m_Vector.size(),
    4415  pair,
    4416  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4417  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4418 }
    4419 
    4420 template<typename KeyT, typename ValueT>
    4421 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4422 {
    4423  PairType* it = VmaBinaryFindFirstNotLess(
    4424  m_Vector.data(),
    4425  m_Vector.data() + m_Vector.size(),
    4426  key,
    4427  VmaPairFirstLess<KeyT, ValueT>());
    4428  if((it != m_Vector.end()) && (it->first == key))
    4429  {
    4430  return it;
    4431  }
    4432  else
    4433  {
    4434  return m_Vector.end();
    4435  }
    4436 }
    4437 
    4438 template<typename KeyT, typename ValueT>
    4439 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4440 {
    4441  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4442 }
    4443 
    4444 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4445 
    4446 #endif // #if 0
    4447 
    4449 
    4450 class VmaDeviceMemoryBlock;
    4451 
    4452 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4453 
    4454 struct VmaAllocation_T
    4455 {
    4456  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4457 private:
    4458  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4459 
    4460  enum FLAGS
    4461  {
    4462  FLAG_USER_DATA_STRING = 0x01,
    4463  };
    4464 
    4465 public:
    4466  enum ALLOCATION_TYPE
    4467  {
    4468  ALLOCATION_TYPE_NONE,
    4469  ALLOCATION_TYPE_BLOCK,
    4470  ALLOCATION_TYPE_DEDICATED,
    4471  };
    4472 
    4473  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4474  m_Alignment(1),
    4475  m_Size(0),
    4476  m_pUserData(VMA_NULL),
    4477  m_LastUseFrameIndex(currentFrameIndex),
    4478  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4479  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4480  m_MapCount(0),
    4481  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4482  {
    4483 #if VMA_STATS_STRING_ENABLED
    4484  m_CreationFrameIndex = currentFrameIndex;
    4485  m_BufferImageUsage = 0;
    4486 #endif
    4487  }
    4488 
    4489  ~VmaAllocation_T()
    4490  {
    4491  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4492 
    4493  // Check if owned string was freed.
    4494  VMA_ASSERT(m_pUserData == VMA_NULL);
    4495  }
    4496 
    4497  void InitBlockAllocation(
    4498  VmaPool hPool,
    4499  VmaDeviceMemoryBlock* block,
    4500  VkDeviceSize offset,
    4501  VkDeviceSize alignment,
    4502  VkDeviceSize size,
    4503  VmaSuballocationType suballocationType,
    4504  bool mapped,
    4505  bool canBecomeLost)
    4506  {
    4507  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4508  VMA_ASSERT(block != VMA_NULL);
    4509  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4510  m_Alignment = alignment;
    4511  m_Size = size;
    4512  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4513  m_SuballocationType = (uint8_t)suballocationType;
    4514  m_BlockAllocation.m_hPool = hPool;
    4515  m_BlockAllocation.m_Block = block;
    4516  m_BlockAllocation.m_Offset = offset;
    4517  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4518  }
    4519 
    4520  void InitLost()
    4521  {
    4522  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4523  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4524  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4525  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4526  m_BlockAllocation.m_Block = VMA_NULL;
    4527  m_BlockAllocation.m_Offset = 0;
    4528  m_BlockAllocation.m_CanBecomeLost = true;
    4529  }
    4530 
    4531  void ChangeBlockAllocation(
    4532  VmaAllocator hAllocator,
    4533  VmaDeviceMemoryBlock* block,
    4534  VkDeviceSize offset);
    4535 
    4536  void ChangeSize(VkDeviceSize newSize);
    4537 
    4538  // pMappedData not null means allocation is created with MAPPED flag.
    4539  void InitDedicatedAllocation(
    4540  uint32_t memoryTypeIndex,
    4541  VkDeviceMemory hMemory,
    4542  VmaSuballocationType suballocationType,
    4543  void* pMappedData,
    4544  VkDeviceSize size)
    4545  {
    4546  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4547  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4548  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4549  m_Alignment = 0;
    4550  m_Size = size;
    4551  m_SuballocationType = (uint8_t)suballocationType;
    4552  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4553  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4554  m_DedicatedAllocation.m_hMemory = hMemory;
    4555  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4556  }
    4557 
    4558  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4559  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4560  VkDeviceSize GetSize() const { return m_Size; }
    4561  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4562  void* GetUserData() const { return m_pUserData; }
    4563  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4564  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4565 
    4566  VmaDeviceMemoryBlock* GetBlock() const
    4567  {
    4568  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4569  return m_BlockAllocation.m_Block;
    4570  }
    4571  VkDeviceSize GetOffset() const;
    4572  VkDeviceMemory GetMemory() const;
    4573  uint32_t GetMemoryTypeIndex() const;
    4574  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4575  void* GetMappedData() const;
    4576  bool CanBecomeLost() const;
    4577  VmaPool GetPool() const;
    4578 
    4579  uint32_t GetLastUseFrameIndex() const
    4580  {
    4581  return m_LastUseFrameIndex.load();
    4582  }
    4583  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4584  {
    4585  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4586  }
    4587  /*
    4588  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4589  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4590  - Else, returns false.
    4591 
    4592  If hAllocation is already lost, assert - you should not call it then.
    4593  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4594  */
    4595  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4596 
    4597  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4598  {
    4599  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4600  outInfo.blockCount = 1;
    4601  outInfo.allocationCount = 1;
    4602  outInfo.unusedRangeCount = 0;
    4603  outInfo.usedBytes = m_Size;
    4604  outInfo.unusedBytes = 0;
    4605  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4606  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4607  outInfo.unusedRangeSizeMax = 0;
    4608  }
    4609 
    4610  void BlockAllocMap();
    4611  void BlockAllocUnmap();
    4612  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4613  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4614 
    4615 #if VMA_STATS_STRING_ENABLED
    4616  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4617  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4618 
    4619  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4620  {
    4621  VMA_ASSERT(m_BufferImageUsage == 0);
    4622  m_BufferImageUsage = bufferImageUsage;
    4623  }
    4624 
    4625  void PrintParameters(class VmaJsonWriter& json) const;
    4626 #endif
    4627 
    4628 private:
    4629  VkDeviceSize m_Alignment;
    4630  VkDeviceSize m_Size;
    4631  void* m_pUserData;
    4632  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4633  uint8_t m_Type; // ALLOCATION_TYPE
    4634  uint8_t m_SuballocationType; // VmaSuballocationType
    4635  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4636  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4637  uint8_t m_MapCount;
    4638  uint8_t m_Flags; // enum FLAGS
    4639 
    4640  // Allocation out of VmaDeviceMemoryBlock.
    4641  struct BlockAllocation
    4642  {
    4643  VmaPool m_hPool; // Null if belongs to general memory.
    4644  VmaDeviceMemoryBlock* m_Block;
    4645  VkDeviceSize m_Offset;
    4646  bool m_CanBecomeLost;
    4647  };
    4648 
    4649  // Allocation for an object that has its own private VkDeviceMemory.
    4650  struct DedicatedAllocation
    4651  {
    4652  uint32_t m_MemoryTypeIndex;
    4653  VkDeviceMemory m_hMemory;
    4654  void* m_pMappedData; // Not null means memory is mapped.
    4655  };
    4656 
    4657  union
    4658  {
    4659  // Allocation out of VmaDeviceMemoryBlock.
    4660  BlockAllocation m_BlockAllocation;
    4661  // Allocation for an object that has its own private VkDeviceMemory.
    4662  DedicatedAllocation m_DedicatedAllocation;
    4663  };
    4664 
    4665 #if VMA_STATS_STRING_ENABLED
    4666  uint32_t m_CreationFrameIndex;
    4667  uint32_t m_BufferImageUsage; // 0 if unknown.
    4668 #endif
    4669 
    4670  void FreeUserDataString(VmaAllocator hAllocator);
    4671 };
    4672 
    4673 /*
    4674 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4675 allocated memory block or free.
    4676 */
    4677 struct VmaSuballocation
    4678 {
    4679  VkDeviceSize offset;
    4680  VkDeviceSize size;
    4681  VmaAllocation hAllocation;
    4682  VmaSuballocationType type;
    4683 };
    4684 
    4685 // Comparator for offsets.
    4686 struct VmaSuballocationOffsetLess
    4687 {
    4688  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4689  {
    4690  return lhs.offset < rhs.offset;
    4691  }
    4692 };
    4693 struct VmaSuballocationOffsetGreater
    4694 {
    4695  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4696  {
    4697  return lhs.offset > rhs.offset;
    4698  }
    4699 };
    4700 
    4701 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4702 
    4703 // Cost of one additional allocation lost, as equivalent in bytes.
    4704 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4705 
    4706 /*
    4707 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4708 
    4709 If canMakeOtherLost was false:
    4710 - item points to a FREE suballocation.
    4711 - itemsToMakeLostCount is 0.
    4712 
    4713 If canMakeOtherLost was true:
    4714 - item points to first of sequence of suballocations, which are either FREE,
    4715  or point to VmaAllocations that can become lost.
    4716 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4717  the requested allocation to succeed.
    4718 */
    4719 struct VmaAllocationRequest
    4720 {
    4721  VkDeviceSize offset;
    4722  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4723  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4724  VmaSuballocationList::iterator item;
    4725  size_t itemsToMakeLostCount;
    4726  void* customData;
    4727 
    4728  VkDeviceSize CalcCost() const
    4729  {
    4730  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4731  }
    4732 };
    4733 
    4734 /*
    4735 Data structure used for bookkeeping of allocations and unused ranges of memory
    4736 in a single VkDeviceMemory block.
    4737 */
    4738 class VmaBlockMetadata
    4739 {
    4740 public:
    4741  VmaBlockMetadata(VmaAllocator hAllocator);
    4742  virtual ~VmaBlockMetadata() { }
    4743  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4744 
    4745  // Validates all data structures inside this object. If not valid, returns false.
    4746  virtual bool Validate() const = 0;
    4747  VkDeviceSize GetSize() const { return m_Size; }
    4748  virtual size_t GetAllocationCount() const = 0;
    4749  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4750  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4751  // Returns true if this block is empty - contains only single free suballocation.
    4752  virtual bool IsEmpty() const = 0;
    4753 
    4754  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4755  // Shouldn't modify blockCount.
    4756  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4757 
    4758 #if VMA_STATS_STRING_ENABLED
    4759  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4760 #endif
    4761 
    4762  // Tries to find a place for suballocation with given parameters inside this block.
    4763  // If succeeded, fills pAllocationRequest and returns true.
    4764  // If failed, returns false.
    4765  virtual bool CreateAllocationRequest(
    4766  uint32_t currentFrameIndex,
    4767  uint32_t frameInUseCount,
    4768  VkDeviceSize bufferImageGranularity,
    4769  VkDeviceSize allocSize,
    4770  VkDeviceSize allocAlignment,
    4771  bool upperAddress,
    4772  VmaSuballocationType allocType,
    4773  bool canMakeOtherLost,
    4774  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4775  VmaAllocationRequest* pAllocationRequest) = 0;
    4776 
    4777  virtual bool MakeRequestedAllocationsLost(
    4778  uint32_t currentFrameIndex,
    4779  uint32_t frameInUseCount,
    4780  VmaAllocationRequest* pAllocationRequest) = 0;
    4781 
    4782  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4783 
    4784  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4785 
    4786  // Makes actual allocation based on request. Request must already be checked and valid.
    4787  virtual void Alloc(
    4788  const VmaAllocationRequest& request,
    4789  VmaSuballocationType type,
    4790  VkDeviceSize allocSize,
    4791  bool upperAddress,
    4792  VmaAllocation hAllocation) = 0;
    4793 
    4794  // Frees suballocation assigned to given memory region.
    4795  virtual void Free(const VmaAllocation allocation) = 0;
    4796  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4797 
    4798  // Tries to resize (grow or shrink) space for given allocation, in place.
    4799  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4800 
    4801 protected:
    4802  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4803 
    4804 #if VMA_STATS_STRING_ENABLED
    4805  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4806  VkDeviceSize unusedBytes,
    4807  size_t allocationCount,
    4808  size_t unusedRangeCount) const;
    4809  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4810  VkDeviceSize offset,
    4811  VmaAllocation hAllocation) const;
    4812  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4813  VkDeviceSize offset,
    4814  VkDeviceSize size) const;
    4815  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4816 #endif
    4817 
    4818 private:
    4819  VkDeviceSize m_Size;
    4820  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4821 };
    4822 
    4823 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4824  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4825  return false; \
    4826  } } while(false)
    4827 
    4828 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4829 {
    4830  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4831 public:
    4832  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4833  virtual ~VmaBlockMetadata_Generic();
    4834  virtual void Init(VkDeviceSize size);
    4835 
    4836  virtual bool Validate() const;
    4837  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4838  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4839  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4840  virtual bool IsEmpty() const;
    4841 
    4842  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4843  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4844 
    4845 #if VMA_STATS_STRING_ENABLED
    4846  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4847 #endif
    4848 
    4849  virtual bool CreateAllocationRequest(
    4850  uint32_t currentFrameIndex,
    4851  uint32_t frameInUseCount,
    4852  VkDeviceSize bufferImageGranularity,
    4853  VkDeviceSize allocSize,
    4854  VkDeviceSize allocAlignment,
    4855  bool upperAddress,
    4856  VmaSuballocationType allocType,
    4857  bool canMakeOtherLost,
    4858  uint32_t strategy,
    4859  VmaAllocationRequest* pAllocationRequest);
    4860 
    4861  virtual bool MakeRequestedAllocationsLost(
    4862  uint32_t currentFrameIndex,
    4863  uint32_t frameInUseCount,
    4864  VmaAllocationRequest* pAllocationRequest);
    4865 
    4866  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4867 
    4868  virtual VkResult CheckCorruption(const void* pBlockData);
    4869 
    4870  virtual void Alloc(
    4871  const VmaAllocationRequest& request,
    4872  VmaSuballocationType type,
    4873  VkDeviceSize allocSize,
    4874  bool upperAddress,
    4875  VmaAllocation hAllocation);
    4876 
    4877  virtual void Free(const VmaAllocation allocation);
    4878  virtual void FreeAtOffset(VkDeviceSize offset);
    4879 
    4880  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4881 
    4882 private:
    4883  uint32_t m_FreeCount;
    4884  VkDeviceSize m_SumFreeSize;
    4885  VmaSuballocationList m_Suballocations;
    4886  // Suballocations that are free and have size greater than certain threshold.
    4887  // Sorted by size, ascending.
    4888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4889 
    4890  bool ValidateFreeSuballocationList() const;
    4891 
    4892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4893  // If yes, fills pOffset and returns true. If no, returns false.
    4894  bool CheckAllocation(
    4895  uint32_t currentFrameIndex,
    4896  uint32_t frameInUseCount,
    4897  VkDeviceSize bufferImageGranularity,
    4898  VkDeviceSize allocSize,
    4899  VkDeviceSize allocAlignment,
    4900  VmaSuballocationType allocType,
    4901  VmaSuballocationList::const_iterator suballocItem,
    4902  bool canMakeOtherLost,
    4903  VkDeviceSize* pOffset,
    4904  size_t* itemsToMakeLostCount,
    4905  VkDeviceSize* pSumFreeSize,
    4906  VkDeviceSize* pSumItemSize) const;
    4907  // Given free suballocation, it merges it with following one, which must also be free.
    4908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4909  // Releases given suballocation, making it free.
    4910  // Merges it with adjacent free suballocations if applicable.
    4911  // Returns iterator to new free suballocation at this place.
    4912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4913  // Given free suballocation, it inserts it into sorted list of
    4914  // m_FreeSuballocationsBySize if it's suitable.
    4915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4916  // Given free suballocation, it removes it from sorted list of
    4917  // m_FreeSuballocationsBySize if it's suitable.
    4918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4919 };
    4920 
    4921 /*
    4922 Allocations and their references in internal data structure look like this:
    4923 
    4924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4925 
    4926  0 +-------+
    4927  | |
    4928  | |
    4929  | |
    4930  +-------+
    4931  | Alloc | 1st[m_1stNullItemsBeginCount]
    4932  +-------+
    4933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4934  +-------+
    4935  | ... |
    4936  +-------+
    4937  | Alloc | 1st[1st.size() - 1]
    4938  +-------+
    4939  | |
    4940  | |
    4941  | |
    4942 GetSize() +-------+
    4943 
    4944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4945 
    4946  0 +-------+
    4947  | Alloc | 2nd[0]
    4948  +-------+
    4949  | Alloc | 2nd[1]
    4950  +-------+
    4951  | ... |
    4952  +-------+
    4953  | Alloc | 2nd[2nd.size() - 1]
    4954  +-------+
    4955  | |
    4956  | |
    4957  | |
    4958  +-------+
    4959  | Alloc | 1st[m_1stNullItemsBeginCount]
    4960  +-------+
    4961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4962  +-------+
    4963  | ... |
    4964  +-------+
    4965  | Alloc | 1st[1st.size() - 1]
    4966  +-------+
    4967  | |
    4968 GetSize() +-------+
    4969 
    4970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4971 
    4972  0 +-------+
    4973  | |
    4974  | |
    4975  | |
    4976  +-------+
    4977  | Alloc | 1st[m_1stNullItemsBeginCount]
    4978  +-------+
    4979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4980  +-------+
    4981  | ... |
    4982  +-------+
    4983  | Alloc | 1st[1st.size() - 1]
    4984  +-------+
    4985  | |
    4986  | |
    4987  | |
    4988  +-------+
    4989  | Alloc | 2nd[2nd.size() - 1]
    4990  +-------+
    4991  | ... |
    4992  +-------+
    4993  | Alloc | 2nd[1]
    4994  +-------+
    4995  | Alloc | 2nd[0]
    4996 GetSize() +-------+
    4997 
    4998 */
    4999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5000 {
    5001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5002 public:
    5003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5004  virtual ~VmaBlockMetadata_Linear();
    5005  virtual void Init(VkDeviceSize size);
    5006 
    5007  virtual bool Validate() const;
    5008  virtual size_t GetAllocationCount() const;
    5009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5012 
    5013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5015 
    5016 #if VMA_STATS_STRING_ENABLED
    5017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5018 #endif
    5019 
    5020  virtual bool CreateAllocationRequest(
    5021  uint32_t currentFrameIndex,
    5022  uint32_t frameInUseCount,
    5023  VkDeviceSize bufferImageGranularity,
    5024  VkDeviceSize allocSize,
    5025  VkDeviceSize allocAlignment,
    5026  bool upperAddress,
    5027  VmaSuballocationType allocType,
    5028  bool canMakeOtherLost,
    5029  uint32_t strategy,
    5030  VmaAllocationRequest* pAllocationRequest);
    5031 
    5032  virtual bool MakeRequestedAllocationsLost(
    5033  uint32_t currentFrameIndex,
    5034  uint32_t frameInUseCount,
    5035  VmaAllocationRequest* pAllocationRequest);
    5036 
    5037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5038 
    5039  virtual VkResult CheckCorruption(const void* pBlockData);
    5040 
    5041  virtual void Alloc(
    5042  const VmaAllocationRequest& request,
    5043  VmaSuballocationType type,
    5044  VkDeviceSize allocSize,
    5045  bool upperAddress,
    5046  VmaAllocation hAllocation);
    5047 
    5048  virtual void Free(const VmaAllocation allocation);
    5049  virtual void FreeAtOffset(VkDeviceSize offset);
    5050 
    5051 private:
    5052  /*
    5053  There are two suballocation vectors, used in ping-pong way.
    5054  The one with index m_1stVectorIndex is called 1st.
    5055  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5056  2nd can be non-empty only when 1st is not empty.
    5057  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5058  */
    5059  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5060 
    5061  enum SECOND_VECTOR_MODE
    5062  {
    5063  SECOND_VECTOR_EMPTY,
    5064  /*
    5065  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5066  all have smaller offset.
    5067  */
    5068  SECOND_VECTOR_RING_BUFFER,
    5069  /*
    5070  Suballocations in 2nd vector are upper side of double stack.
    5071  They all have offsets higher than those in 1st vector.
    5072  Top of this stack means smaller offsets, but higher indices in this vector.
    5073  */
    5074  SECOND_VECTOR_DOUBLE_STACK,
    5075  };
    5076 
    5077  VkDeviceSize m_SumFreeSize;
    5078  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5079  uint32_t m_1stVectorIndex;
    5080  SECOND_VECTOR_MODE m_2ndVectorMode;
    5081 
    5082  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5083  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5084  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5085  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5086 
    5087  // Number of items in 1st vector with hAllocation = null at the beginning.
    5088  size_t m_1stNullItemsBeginCount;
    5089  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5090  size_t m_1stNullItemsMiddleCount;
    5091  // Number of items in 2nd vector with hAllocation = null.
    5092  size_t m_2ndNullItemsCount;
    5093 
    5094  bool ShouldCompact1st() const;
    5095  void CleanupAfterFree();
    5096 };
    5097 
    5098 /*
    5099 - GetSize() is the original size of allocated memory block.
    5100 - m_UsableSize is this size aligned down to a power of two.
    5101  All allocations and calculations happen relative to m_UsableSize.
    5102 - GetUnusableSize() is the difference between them.
    5103  It is repoted as separate, unused range, not available for allocations.
    5104 
    5105 Node at level 0 has size = m_UsableSize.
    5106 Each next level contains nodes with size 2 times smaller than current level.
    5107 m_LevelCount is the maximum number of levels to use in the current object.
    5108 */
    5109 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5110 {
    5111  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5112 public:
    5113  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5114  virtual ~VmaBlockMetadata_Buddy();
    5115  virtual void Init(VkDeviceSize size);
    5116 
    5117  virtual bool Validate() const;
    5118  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5119  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5120  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5121  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5122 
    5123  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5124  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5125 
    5126 #if VMA_STATS_STRING_ENABLED
    5127  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5128 #endif
    5129 
    5130  virtual bool CreateAllocationRequest(
    5131  uint32_t currentFrameIndex,
    5132  uint32_t frameInUseCount,
    5133  VkDeviceSize bufferImageGranularity,
    5134  VkDeviceSize allocSize,
    5135  VkDeviceSize allocAlignment,
    5136  bool upperAddress,
    5137  VmaSuballocationType allocType,
    5138  bool canMakeOtherLost,
    5139  uint32_t strategy,
    5140  VmaAllocationRequest* pAllocationRequest);
    5141 
    5142  virtual bool MakeRequestedAllocationsLost(
    5143  uint32_t currentFrameIndex,
    5144  uint32_t frameInUseCount,
    5145  VmaAllocationRequest* pAllocationRequest);
    5146 
    5147  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5148 
    5149  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5150 
    5151  virtual void Alloc(
    5152  const VmaAllocationRequest& request,
    5153  VmaSuballocationType type,
    5154  VkDeviceSize allocSize,
    5155  bool upperAddress,
    5156  VmaAllocation hAllocation);
    5157 
    5158  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5159  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5160 
    5161 private:
    5162  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5163  static const size_t MAX_LEVELS = 30;
    5164 
    5165  struct ValidationContext
    5166  {
    5167  size_t calculatedAllocationCount;
    5168  size_t calculatedFreeCount;
    5169  VkDeviceSize calculatedSumFreeSize;
    5170 
    5171  ValidationContext() :
    5172  calculatedAllocationCount(0),
    5173  calculatedFreeCount(0),
    5174  calculatedSumFreeSize(0) { }
    5175  };
    5176 
    5177  struct Node
    5178  {
    5179  VkDeviceSize offset;
    5180  enum TYPE
    5181  {
    5182  TYPE_FREE,
    5183  TYPE_ALLOCATION,
    5184  TYPE_SPLIT,
    5185  TYPE_COUNT
    5186  } type;
    5187  Node* parent;
    5188  Node* buddy;
    5189 
    5190  union
    5191  {
    5192  struct
    5193  {
    5194  Node* prev;
    5195  Node* next;
    5196  } free;
    5197  struct
    5198  {
    5199  VmaAllocation alloc;
    5200  } allocation;
    5201  struct
    5202  {
    5203  Node* leftChild;
    5204  } split;
    5205  };
    5206  };
    5207 
    5208  // Size of the memory block aligned down to a power of two.
    5209  VkDeviceSize m_UsableSize;
    5210  uint32_t m_LevelCount;
    5211 
    5212  Node* m_Root;
    5213  struct {
    5214  Node* front;
    5215  Node* back;
    5216  } m_FreeList[MAX_LEVELS];
    5217  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5218  size_t m_AllocationCount;
    5219  // Number of nodes in the tree with type == TYPE_FREE.
    5220  size_t m_FreeCount;
    5221  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5222  VkDeviceSize m_SumFreeSize;
    5223 
    5224  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5225  void DeleteNode(Node* node);
    5226  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5227  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5228  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5229  // Alloc passed just for validation. Can be null.
    5230  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5231  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5232  // Adds node to the front of FreeList at given level.
    5233  // node->type must be FREE.
    5234  // node->free.prev, next can be undefined.
    5235  void AddToFreeListFront(uint32_t level, Node* node);
    5236  // Removes node from FreeList at given level.
    5237  // node->type must be FREE.
    5238  // node->free.prev, next stay untouched.
    5239  void RemoveFromFreeList(uint32_t level, Node* node);
    5240 
    5241 #if VMA_STATS_STRING_ENABLED
    5242  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5243 #endif
    5244 };
    5245 
    5246 /*
    5247 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5248 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5249 
    5250 Thread-safety: This class must be externally synchronized.
    5251 */
    5252 class VmaDeviceMemoryBlock
    5253 {
    5254  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5255 public:
    5256  VmaBlockMetadata* m_pMetadata;
    5257 
    5258  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5259 
    5260  ~VmaDeviceMemoryBlock()
    5261  {
    5262  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5263  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5264  }
    5265 
    5266  // Always call after construction.
    5267  void Init(
    5268  VmaAllocator hAllocator,
    5269  uint32_t newMemoryTypeIndex,
    5270  VkDeviceMemory newMemory,
    5271  VkDeviceSize newSize,
    5272  uint32_t id,
    5273  uint32_t algorithm);
    5274  // Always call before destruction.
    5275  void Destroy(VmaAllocator allocator);
    5276 
    5277  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5278  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5279  uint32_t GetId() const { return m_Id; }
    5280  void* GetMappedData() const { return m_pMappedData; }
    5281 
    5282  // Validates all data structures inside this object. If not valid, returns false.
    5283  bool Validate() const;
    5284 
    5285  VkResult CheckCorruption(VmaAllocator hAllocator);
    5286 
    5287  // ppData can be null.
    5288  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5289  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5290 
    5291  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5292  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5293 
    5294  VkResult BindBufferMemory(
    5295  const VmaAllocator hAllocator,
    5296  const VmaAllocation hAllocation,
    5297  VkBuffer hBuffer);
    5298  VkResult BindImageMemory(
    5299  const VmaAllocator hAllocator,
    5300  const VmaAllocation hAllocation,
    5301  VkImage hImage);
    5302 
    5303 private:
    5304  uint32_t m_MemoryTypeIndex;
    5305  uint32_t m_Id;
    5306  VkDeviceMemory m_hMemory;
    5307 
    5308  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5309  // Also protects m_MapCount, m_pMappedData.
    5310  VMA_MUTEX m_Mutex;
    5311  uint32_t m_MapCount;
    5312  void* m_pMappedData;
    5313 };
    5314 
    5315 struct VmaPointerLess
    5316 {
    5317  bool operator()(const void* lhs, const void* rhs) const
    5318  {
    5319  return lhs < rhs;
    5320  }
    5321 };
    5322 
    5323 class VmaDefragmentator;
    5324 
    5325 /*
    5326 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5327 Vulkan memory type.
    5328 
    5329 Synchronized internally with a mutex.
    5330 */
    5331 struct VmaBlockVector
    5332 {
    5333  VMA_CLASS_NO_COPY(VmaBlockVector)
    5334 public:
    5335  VmaBlockVector(
    5336  VmaAllocator hAllocator,
    5337  uint32_t memoryTypeIndex,
    5338  VkDeviceSize preferredBlockSize,
    5339  size_t minBlockCount,
    5340  size_t maxBlockCount,
    5341  VkDeviceSize bufferImageGranularity,
    5342  uint32_t frameInUseCount,
    5343  bool isCustomPool,
    5344  bool explicitBlockSize,
    5345  uint32_t algorithm);
    5346  ~VmaBlockVector();
    5347 
    5348  VkResult CreateMinBlocks();
    5349 
    5350  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5351  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5352  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5353  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5354  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5355 
    5356  void GetPoolStats(VmaPoolStats* pStats);
    5357 
    5358  bool IsEmpty() const { return m_Blocks.empty(); }
    5359  bool IsCorruptionDetectionEnabled() const;
    5360 
    5361  VkResult Allocate(
    5362  VmaPool hCurrentPool,
    5363  uint32_t currentFrameIndex,
    5364  VkDeviceSize size,
    5365  VkDeviceSize alignment,
    5366  const VmaAllocationCreateInfo& createInfo,
    5367  VmaSuballocationType suballocType,
    5368  VmaAllocation* pAllocation);
    5369 
    5370  void Free(
    5371  VmaAllocation hAllocation);
    5372 
    5373  // Adds statistics of this BlockVector to pStats.
    5374  void AddStats(VmaStats* pStats);
    5375 
    5376 #if VMA_STATS_STRING_ENABLED
    5377  void PrintDetailedMap(class VmaJsonWriter& json);
    5378 #endif
    5379 
    5380  void MakePoolAllocationsLost(
    5381  uint32_t currentFrameIndex,
    5382  size_t* pLostAllocationCount);
    5383  VkResult CheckCorruption();
    5384 
    5385  VmaDefragmentator* EnsureDefragmentator(
    5386  VmaAllocator hAllocator,
    5387  uint32_t currentFrameIndex);
    5388 
    5389  VkResult Defragment(
    5390  VmaDefragmentationStats* pDefragmentationStats,
    5391  VkDeviceSize& maxBytesToMove,
    5392  uint32_t& maxAllocationsToMove);
    5393 
    5394  void DestroyDefragmentator();
    5395 
    5396 private:
    5397  friend class VmaDefragmentator;
    5398 
    5399  const VmaAllocator m_hAllocator;
    5400  const uint32_t m_MemoryTypeIndex;
    5401  const VkDeviceSize m_PreferredBlockSize;
    5402  const size_t m_MinBlockCount;
    5403  const size_t m_MaxBlockCount;
    5404  const VkDeviceSize m_BufferImageGranularity;
    5405  const uint32_t m_FrameInUseCount;
    5406  const bool m_IsCustomPool;
    5407  const bool m_ExplicitBlockSize;
    5408  const uint32_t m_Algorithm;
    5409  bool m_HasEmptyBlock;
    5410  VMA_MUTEX m_Mutex;
    5411  // Incrementally sorted by sumFreeSize, ascending.
    5412  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5413  /* There can be at most one allocation that is completely empty - a
    5414  hysteresis to avoid pessimistic case of alternating creation and destruction
    5415  of a VkDeviceMemory. */
    5416  VmaDefragmentator* m_pDefragmentator;
    5417  uint32_t m_NextBlockId;
    5418 
    5419  VkDeviceSize CalcMaxBlockSize() const;
    5420 
    5421  // Finds and removes given block from vector.
    5422  void Remove(VmaDeviceMemoryBlock* pBlock);
    5423 
    5424  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5425  // after this call.
    5426  void IncrementallySortBlocks();
    5427 
    5428  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5429  VkResult AllocateFromBlock(
    5430  VmaDeviceMemoryBlock* pBlock,
    5431  VmaPool hCurrentPool,
    5432  uint32_t currentFrameIndex,
    5433  VkDeviceSize size,
    5434  VkDeviceSize alignment,
    5435  VmaAllocationCreateFlags allocFlags,
    5436  void* pUserData,
    5437  VmaSuballocationType suballocType,
    5438  uint32_t strategy,
    5439  VmaAllocation* pAllocation);
    5440 
    5441  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5442 };
    5443 
    5444 struct VmaPool_T
    5445 {
    5446  VMA_CLASS_NO_COPY(VmaPool_T)
    5447 public:
    5448  VmaBlockVector m_BlockVector;
    5449 
    5450  VmaPool_T(
    5451  VmaAllocator hAllocator,
    5452  const VmaPoolCreateInfo& createInfo,
    5453  VkDeviceSize preferredBlockSize);
    5454  ~VmaPool_T();
    5455 
    5456  uint32_t GetId() const { return m_Id; }
    5457  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5458 
    5459 #if VMA_STATS_STRING_ENABLED
    5460  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5461 #endif
    5462 
    5463 private:
    5464  uint32_t m_Id;
    5465 };
    5466 
    5467 class VmaDefragmentator
    5468 {
    5469  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5470 private:
    5471  const VmaAllocator m_hAllocator;
    5472  VmaBlockVector* const m_pBlockVector;
    5473  uint32_t m_CurrentFrameIndex;
    5474  VkDeviceSize m_BytesMoved;
    5475  uint32_t m_AllocationsMoved;
    5476 
    5477  struct AllocationInfo
    5478  {
    5479  VmaAllocation m_hAllocation;
    5480  VkBool32* m_pChanged;
    5481 
    5482  AllocationInfo() :
    5483  m_hAllocation(VK_NULL_HANDLE),
    5484  m_pChanged(VMA_NULL)
    5485  {
    5486  }
    5487  };
    5488 
    5489  struct AllocationInfoSizeGreater
    5490  {
    5491  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5492  {
    5493  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5494  }
    5495  };
    5496 
    5497  // Used between AddAllocation and Defragment.
    5498  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5499 
    5500  struct BlockInfo
    5501  {
    5502  VmaDeviceMemoryBlock* m_pBlock;
    5503  bool m_HasNonMovableAllocations;
    5504  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5505 
    5506  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5507  m_pBlock(VMA_NULL),
    5508  m_HasNonMovableAllocations(true),
    5509  m_Allocations(pAllocationCallbacks),
    5510  m_pMappedDataForDefragmentation(VMA_NULL)
    5511  {
    5512  }
    5513 
    5514  void CalcHasNonMovableAllocations()
    5515  {
    5516  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5517  const size_t defragmentAllocCount = m_Allocations.size();
    5518  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5519  }
    5520 
    5521  void SortAllocationsBySizeDescecnding()
    5522  {
    5523  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5524  }
    5525 
    5526  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5527  void Unmap(VmaAllocator hAllocator);
    5528 
    5529  private:
    5530  // Not null if mapped for defragmentation only, not originally mapped.
    5531  void* m_pMappedDataForDefragmentation;
    5532  };
    5533 
    5534  struct BlockPointerLess
    5535  {
    5536  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5537  {
    5538  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5539  }
    5540  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5541  {
    5542  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5543  }
    5544  };
    5545 
    5546  // 1. Blocks with some non-movable allocations go first.
    5547  // 2. Blocks with smaller sumFreeSize go first.
    5548  struct BlockInfoCompareMoveDestination
    5549  {
    5550  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5551  {
    5552  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5553  {
    5554  return true;
    5555  }
    5556  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5557  {
    5558  return false;
    5559  }
    5560  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5561  {
    5562  return true;
    5563  }
    5564  return false;
    5565  }
    5566  };
    5567 
    5568  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5569  BlockInfoVector m_Blocks;
    5570 
    5571  VkResult DefragmentRound(
    5572  VkDeviceSize maxBytesToMove,
    5573  uint32_t maxAllocationsToMove);
    5574 
    5575  static bool MoveMakesSense(
    5576  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5577  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5578 
    5579 public:
    5580  VmaDefragmentator(
    5581  VmaAllocator hAllocator,
    5582  VmaBlockVector* pBlockVector,
    5583  uint32_t currentFrameIndex);
    5584 
    5585  ~VmaDefragmentator();
    5586 
    5587  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5588  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5589 
    5590  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5591 
    5592  VkResult Defragment(
    5593  VkDeviceSize maxBytesToMove,
    5594  uint32_t maxAllocationsToMove);
    5595 };
    5596 
    5597 #if VMA_RECORDING_ENABLED
    5598 
    5599 class VmaRecorder
    5600 {
    5601 public:
    5602  VmaRecorder();
    5603  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5604  void WriteConfiguration(
    5605  const VkPhysicalDeviceProperties& devProps,
    5606  const VkPhysicalDeviceMemoryProperties& memProps,
    5607  bool dedicatedAllocationExtensionEnabled);
    5608  ~VmaRecorder();
    5609 
    5610  void RecordCreateAllocator(uint32_t frameIndex);
    5611  void RecordDestroyAllocator(uint32_t frameIndex);
    5612  void RecordCreatePool(uint32_t frameIndex,
    5613  const VmaPoolCreateInfo& createInfo,
    5614  VmaPool pool);
    5615  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5616  void RecordAllocateMemory(uint32_t frameIndex,
    5617  const VkMemoryRequirements& vkMemReq,
    5618  const VmaAllocationCreateInfo& createInfo,
    5619  VmaAllocation allocation);
    5620  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5621  const VkMemoryRequirements& vkMemReq,
    5622  bool requiresDedicatedAllocation,
    5623  bool prefersDedicatedAllocation,
    5624  const VmaAllocationCreateInfo& createInfo,
    5625  VmaAllocation allocation);
    5626  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5627  const VkMemoryRequirements& vkMemReq,
    5628  bool requiresDedicatedAllocation,
    5629  bool prefersDedicatedAllocation,
    5630  const VmaAllocationCreateInfo& createInfo,
    5631  VmaAllocation allocation);
    5632  void RecordFreeMemory(uint32_t frameIndex,
    5633  VmaAllocation allocation);
    5634  void RecordResizeAllocation(
    5635  uint32_t frameIndex,
    5636  VmaAllocation allocation,
    5637  VkDeviceSize newSize);
    5638  void RecordSetAllocationUserData(uint32_t frameIndex,
    5639  VmaAllocation allocation,
    5640  const void* pUserData);
    5641  void RecordCreateLostAllocation(uint32_t frameIndex,
    5642  VmaAllocation allocation);
    5643  void RecordMapMemory(uint32_t frameIndex,
    5644  VmaAllocation allocation);
    5645  void RecordUnmapMemory(uint32_t frameIndex,
    5646  VmaAllocation allocation);
    5647  void RecordFlushAllocation(uint32_t frameIndex,
    5648  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5649  void RecordInvalidateAllocation(uint32_t frameIndex,
    5650  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5651  void RecordCreateBuffer(uint32_t frameIndex,
    5652  const VkBufferCreateInfo& bufCreateInfo,
    5653  const VmaAllocationCreateInfo& allocCreateInfo,
    5654  VmaAllocation allocation);
    5655  void RecordCreateImage(uint32_t frameIndex,
    5656  const VkImageCreateInfo& imageCreateInfo,
    5657  const VmaAllocationCreateInfo& allocCreateInfo,
    5658  VmaAllocation allocation);
    5659  void RecordDestroyBuffer(uint32_t frameIndex,
    5660  VmaAllocation allocation);
    5661  void RecordDestroyImage(uint32_t frameIndex,
    5662  VmaAllocation allocation);
    5663  void RecordTouchAllocation(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordGetAllocationInfo(uint32_t frameIndex,
    5666  VmaAllocation allocation);
    5667  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5668  VmaPool pool);
    5669 
    5670 private:
    5671  struct CallParams
    5672  {
    5673  uint32_t threadId;
    5674  double time;
    5675  };
    5676 
    5677  class UserDataString
    5678  {
    5679  public:
    5680  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5681  const char* GetString() const { return m_Str; }
    5682 
    5683  private:
    5684  char m_PtrStr[17];
    5685  const char* m_Str;
    5686  };
    5687 
    5688  bool m_UseMutex;
    5689  VmaRecordFlags m_Flags;
    5690  FILE* m_File;
    5691  VMA_MUTEX m_FileMutex;
    5692  int64_t m_Freq;
    5693  int64_t m_StartCounter;
    5694 
    5695  void GetBasicParams(CallParams& outParams);
    5696  void Flush();
    5697 };
    5698 
    5699 #endif // #if VMA_RECORDING_ENABLED
    5700 
    5701 // Main allocator object.
    5702 struct VmaAllocator_T
    5703 {
    5704  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5705 public:
    5706  bool m_UseMutex;
    5707  bool m_UseKhrDedicatedAllocation;
    5708  VkDevice m_hDevice;
    5709  bool m_AllocationCallbacksSpecified;
    5710  VkAllocationCallbacks m_AllocationCallbacks;
    5711  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5712 
    5713  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5714  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5715  VMA_MUTEX m_HeapSizeLimitMutex;
    5716 
    5717  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5718  VkPhysicalDeviceMemoryProperties m_MemProps;
    5719 
    5720  // Default pools.
    5721  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5722 
    5723  // Each vector is sorted by memory (handle value).
    5724  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5725  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5726  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5727 
    5728  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5729  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5730  ~VmaAllocator_T();
    5731 
    5732  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5733  {
    5734  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5735  }
    5736  const VmaVulkanFunctions& GetVulkanFunctions() const
    5737  {
    5738  return m_VulkanFunctions;
    5739  }
    5740 
    5741  VkDeviceSize GetBufferImageGranularity() const
    5742  {
    5743  return VMA_MAX(
    5744  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5745  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5746  }
    5747 
    5748  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5749  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5750 
    5751  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5752  {
    5753  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5754  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5755  }
    5756  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5757  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5758  {
    5759  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5760  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5761  }
    5762  // Minimum alignment for all allocations in specific memory type.
    5763  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5764  {
    5765  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5766  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5767  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5768  }
    5769 
    5770  bool IsIntegratedGpu() const
    5771  {
    5772  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5773  }
    5774 
    5775 #if VMA_RECORDING_ENABLED
    5776  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5777 #endif
    5778 
    5779  void GetBufferMemoryRequirements(
    5780  VkBuffer hBuffer,
    5781  VkMemoryRequirements& memReq,
    5782  bool& requiresDedicatedAllocation,
    5783  bool& prefersDedicatedAllocation) const;
    5784  void GetImageMemoryRequirements(
    5785  VkImage hImage,
    5786  VkMemoryRequirements& memReq,
    5787  bool& requiresDedicatedAllocation,
    5788  bool& prefersDedicatedAllocation) const;
    5789 
    5790  // Main allocation function.
    5791  VkResult AllocateMemory(
    5792  const VkMemoryRequirements& vkMemReq,
    5793  bool requiresDedicatedAllocation,
    5794  bool prefersDedicatedAllocation,
    5795  VkBuffer dedicatedBuffer,
    5796  VkImage dedicatedImage,
    5797  const VmaAllocationCreateInfo& createInfo,
    5798  VmaSuballocationType suballocType,
    5799  VmaAllocation* pAllocation);
    5800 
    5801  // Main deallocation function.
    5802  void FreeMemory(const VmaAllocation allocation);
    5803 
    5804  VkResult ResizeAllocation(
    5805  const VmaAllocation alloc,
    5806  VkDeviceSize newSize);
    5807 
    5808  void CalculateStats(VmaStats* pStats);
    5809 
    5810 #if VMA_STATS_STRING_ENABLED
    5811  void PrintDetailedMap(class VmaJsonWriter& json);
    5812 #endif
    5813 
    5814  VkResult Defragment(
    5815  VmaAllocation* pAllocations,
    5816  size_t allocationCount,
    5817  VkBool32* pAllocationsChanged,
    5818  const VmaDefragmentationInfo* pDefragmentationInfo,
    5819  VmaDefragmentationStats* pDefragmentationStats);
    5820 
    5821  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5822  bool TouchAllocation(VmaAllocation hAllocation);
    5823 
    5824  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5825  void DestroyPool(VmaPool pool);
    5826  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5827 
    5828  void SetCurrentFrameIndex(uint32_t frameIndex);
    5829  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5830 
    5831  void MakePoolAllocationsLost(
    5832  VmaPool hPool,
    5833  size_t* pLostAllocationCount);
    5834  VkResult CheckPoolCorruption(VmaPool hPool);
    5835  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5836 
    5837  void CreateLostAllocation(VmaAllocation* pAllocation);
    5838 
    5839  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5840  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5841 
    5842  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5843  void Unmap(VmaAllocation hAllocation);
    5844 
    5845  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5846  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5847 
    5848  void FlushOrInvalidateAllocation(
    5849  VmaAllocation hAllocation,
    5850  VkDeviceSize offset, VkDeviceSize size,
    5851  VMA_CACHE_OPERATION op);
    5852 
    5853  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5854 
    5855 private:
    5856  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5857 
    5858  VkPhysicalDevice m_PhysicalDevice;
    5859  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5860 
    5861  VMA_MUTEX m_PoolsMutex;
    5862  // Protected by m_PoolsMutex. Sorted by pointer value.
    5863  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5864  uint32_t m_NextPoolId;
    5865 
    5866  VmaVulkanFunctions m_VulkanFunctions;
    5867 
    5868 #if VMA_RECORDING_ENABLED
    5869  VmaRecorder* m_pRecorder;
    5870 #endif
    5871 
    5872  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5873 
    5874  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5875 
    5876  VkResult AllocateMemoryOfType(
    5877  VkDeviceSize size,
    5878  VkDeviceSize alignment,
    5879  bool dedicatedAllocation,
    5880  VkBuffer dedicatedBuffer,
    5881  VkImage dedicatedImage,
    5882  const VmaAllocationCreateInfo& createInfo,
    5883  uint32_t memTypeIndex,
    5884  VmaSuballocationType suballocType,
    5885  VmaAllocation* pAllocation);
    5886 
    5887  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5888  VkResult AllocateDedicatedMemory(
    5889  VkDeviceSize size,
    5890  VmaSuballocationType suballocType,
    5891  uint32_t memTypeIndex,
    5892  bool map,
    5893  bool isUserDataString,
    5894  void* pUserData,
    5895  VkBuffer dedicatedBuffer,
    5896  VkImage dedicatedImage,
    5897  VmaAllocation* pAllocation);
    5898 
    5899  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5900  void FreeDedicatedMemory(VmaAllocation allocation);
    5901 };
    5902 
    5904 // Memory allocation #2 after VmaAllocator_T definition
    5905 
    5906 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5907 {
    5908  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5909 }
    5910 
    5911 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5912 {
    5913  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5914 }
    5915 
    5916 template<typename T>
    5917 static T* VmaAllocate(VmaAllocator hAllocator)
    5918 {
    5919  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5920 }
    5921 
    5922 template<typename T>
    5923 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5924 {
    5925  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5926 }
    5927 
    5928 template<typename T>
    5929 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5930 {
    5931  if(ptr != VMA_NULL)
    5932  {
    5933  ptr->~T();
    5934  VmaFree(hAllocator, ptr);
    5935  }
    5936 }
    5937 
    5938 template<typename T>
    5939 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5940 {
    5941  if(ptr != VMA_NULL)
    5942  {
    5943  for(size_t i = count; i--; )
    5944  ptr[i].~T();
    5945  VmaFree(hAllocator, ptr);
    5946  }
    5947 }
    5948 
    5950 // VmaStringBuilder
    5951 
    5952 #if VMA_STATS_STRING_ENABLED
    5953 
    5954 class VmaStringBuilder
    5955 {
    5956 public:
    5957  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5958  size_t GetLength() const { return m_Data.size(); }
    5959  const char* GetData() const { return m_Data.data(); }
    5960 
    5961  void Add(char ch) { m_Data.push_back(ch); }
    5962  void Add(const char* pStr);
    5963  void AddNewLine() { Add('\n'); }
    5964  void AddNumber(uint32_t num);
    5965  void AddNumber(uint64_t num);
    5966  void AddPointer(const void* ptr);
    5967 
    5968 private:
    5969  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5970 };
    5971 
    5972 void VmaStringBuilder::Add(const char* pStr)
    5973 {
    5974  const size_t strLen = strlen(pStr);
    5975  if(strLen > 0)
    5976  {
    5977  const size_t oldCount = m_Data.size();
    5978  m_Data.resize(oldCount + strLen);
    5979  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5980  }
    5981 }
    5982 
    5983 void VmaStringBuilder::AddNumber(uint32_t num)
    5984 {
    5985  char buf[11];
    5986  VmaUint32ToStr(buf, sizeof(buf), num);
    5987  Add(buf);
    5988 }
    5989 
    5990 void VmaStringBuilder::AddNumber(uint64_t num)
    5991 {
    5992  char buf[21];
    5993  VmaUint64ToStr(buf, sizeof(buf), num);
    5994  Add(buf);
    5995 }
    5996 
    5997 void VmaStringBuilder::AddPointer(const void* ptr)
    5998 {
    5999  char buf[21];
    6000  VmaPtrToStr(buf, sizeof(buf), ptr);
    6001  Add(buf);
    6002 }
    6003 
    6004 #endif // #if VMA_STATS_STRING_ENABLED
    6005 
    6007 // VmaJsonWriter
    6008 
    6009 #if VMA_STATS_STRING_ENABLED
    6010 
    6011 class VmaJsonWriter
    6012 {
    6013  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6014 public:
    6015  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6016  ~VmaJsonWriter();
    6017 
    6018  void BeginObject(bool singleLine = false);
    6019  void EndObject();
    6020 
    6021  void BeginArray(bool singleLine = false);
    6022  void EndArray();
    6023 
    6024  void WriteString(const char* pStr);
    6025  void BeginString(const char* pStr = VMA_NULL);
    6026  void ContinueString(const char* pStr);
    6027  void ContinueString(uint32_t n);
    6028  void ContinueString(uint64_t n);
    6029  void ContinueString_Pointer(const void* ptr);
    6030  void EndString(const char* pStr = VMA_NULL);
    6031 
    6032  void WriteNumber(uint32_t n);
    6033  void WriteNumber(uint64_t n);
    6034  void WriteBool(bool b);
    6035  void WriteNull();
    6036 
    6037 private:
    6038  static const char* const INDENT;
    6039 
    6040  enum COLLECTION_TYPE
    6041  {
    6042  COLLECTION_TYPE_OBJECT,
    6043  COLLECTION_TYPE_ARRAY,
    6044  };
    6045  struct StackItem
    6046  {
    6047  COLLECTION_TYPE type;
    6048  uint32_t valueCount;
    6049  bool singleLineMode;
    6050  };
    6051 
    6052  VmaStringBuilder& m_SB;
    6053  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6054  bool m_InsideString;
    6055 
    6056  void BeginValue(bool isString);
    6057  void WriteIndent(bool oneLess = false);
    6058 };
    6059 
    6060 const char* const VmaJsonWriter::INDENT = " ";
    6061 
    6062 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6063  m_SB(sb),
    6064  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6065  m_InsideString(false)
    6066 {
    6067 }
    6068 
    6069 VmaJsonWriter::~VmaJsonWriter()
    6070 {
    6071  VMA_ASSERT(!m_InsideString);
    6072  VMA_ASSERT(m_Stack.empty());
    6073 }
    6074 
    6075 void VmaJsonWriter::BeginObject(bool singleLine)
    6076 {
    6077  VMA_ASSERT(!m_InsideString);
    6078 
    6079  BeginValue(false);
    6080  m_SB.Add('{');
    6081 
    6082  StackItem item;
    6083  item.type = COLLECTION_TYPE_OBJECT;
    6084  item.valueCount = 0;
    6085  item.singleLineMode = singleLine;
    6086  m_Stack.push_back(item);
    6087 }
    6088 
    6089 void VmaJsonWriter::EndObject()
    6090 {
    6091  VMA_ASSERT(!m_InsideString);
    6092 
    6093  WriteIndent(true);
    6094  m_SB.Add('}');
    6095 
    6096  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6097  m_Stack.pop_back();
    6098 }
    6099 
    6100 void VmaJsonWriter::BeginArray(bool singleLine)
    6101 {
    6102  VMA_ASSERT(!m_InsideString);
    6103 
    6104  BeginValue(false);
    6105  m_SB.Add('[');
    6106 
    6107  StackItem item;
    6108  item.type = COLLECTION_TYPE_ARRAY;
    6109  item.valueCount = 0;
    6110  item.singleLineMode = singleLine;
    6111  m_Stack.push_back(item);
    6112 }
    6113 
    6114 void VmaJsonWriter::EndArray()
    6115 {
    6116  VMA_ASSERT(!m_InsideString);
    6117 
    6118  WriteIndent(true);
    6119  m_SB.Add(']');
    6120 
    6121  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6122  m_Stack.pop_back();
    6123 }
    6124 
    6125 void VmaJsonWriter::WriteString(const char* pStr)
    6126 {
    6127  BeginString(pStr);
    6128  EndString();
    6129 }
    6130 
    6131 void VmaJsonWriter::BeginString(const char* pStr)
    6132 {
    6133  VMA_ASSERT(!m_InsideString);
    6134 
    6135  BeginValue(true);
    6136  m_SB.Add('"');
    6137  m_InsideString = true;
    6138  if(pStr != VMA_NULL && pStr[0] != '\0')
    6139  {
    6140  ContinueString(pStr);
    6141  }
    6142 }
    6143 
    6144 void VmaJsonWriter::ContinueString(const char* pStr)
    6145 {
    6146  VMA_ASSERT(m_InsideString);
    6147 
    6148  const size_t strLen = strlen(pStr);
    6149  for(size_t i = 0; i < strLen; ++i)
    6150  {
    6151  char ch = pStr[i];
    6152  if(ch == '\\')
    6153  {
    6154  m_SB.Add("\\\\");
    6155  }
    6156  else if(ch == '"')
    6157  {
    6158  m_SB.Add("\\\"");
    6159  }
    6160  else if(ch >= 32)
    6161  {
    6162  m_SB.Add(ch);
    6163  }
    6164  else switch(ch)
    6165  {
    6166  case '\b':
    6167  m_SB.Add("\\b");
    6168  break;
    6169  case '\f':
    6170  m_SB.Add("\\f");
    6171  break;
    6172  case '\n':
    6173  m_SB.Add("\\n");
    6174  break;
    6175  case '\r':
    6176  m_SB.Add("\\r");
    6177  break;
    6178  case '\t':
    6179  m_SB.Add("\\t");
    6180  break;
    6181  default:
    6182  VMA_ASSERT(0 && "Character not currently supported.");
    6183  break;
    6184  }
    6185  }
    6186 }
    6187 
    6188 void VmaJsonWriter::ContinueString(uint32_t n)
    6189 {
    6190  VMA_ASSERT(m_InsideString);
    6191  m_SB.AddNumber(n);
    6192 }
    6193 
    6194 void VmaJsonWriter::ContinueString(uint64_t n)
    6195 {
    6196  VMA_ASSERT(m_InsideString);
    6197  m_SB.AddNumber(n);
    6198 }
    6199 
    6200 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6201 {
    6202  VMA_ASSERT(m_InsideString);
    6203  m_SB.AddPointer(ptr);
    6204 }
    6205 
    6206 void VmaJsonWriter::EndString(const char* pStr)
    6207 {
    6208  VMA_ASSERT(m_InsideString);
    6209  if(pStr != VMA_NULL && pStr[0] != '\0')
    6210  {
    6211  ContinueString(pStr);
    6212  }
    6213  m_SB.Add('"');
    6214  m_InsideString = false;
    6215 }
    6216 
    6217 void VmaJsonWriter::WriteNumber(uint32_t n)
    6218 {
    6219  VMA_ASSERT(!m_InsideString);
    6220  BeginValue(false);
    6221  m_SB.AddNumber(n);
    6222 }
    6223 
    6224 void VmaJsonWriter::WriteNumber(uint64_t n)
    6225 {
    6226  VMA_ASSERT(!m_InsideString);
    6227  BeginValue(false);
    6228  m_SB.AddNumber(n);
    6229 }
    6230 
    6231 void VmaJsonWriter::WriteBool(bool b)
    6232 {
    6233  VMA_ASSERT(!m_InsideString);
    6234  BeginValue(false);
    6235  m_SB.Add(b ? "true" : "false");
    6236 }
    6237 
    6238 void VmaJsonWriter::WriteNull()
    6239 {
    6240  VMA_ASSERT(!m_InsideString);
    6241  BeginValue(false);
    6242  m_SB.Add("null");
    6243 }
    6244 
    6245 void VmaJsonWriter::BeginValue(bool isString)
    6246 {
    6247  if(!m_Stack.empty())
    6248  {
    6249  StackItem& currItem = m_Stack.back();
    6250  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6251  currItem.valueCount % 2 == 0)
    6252  {
    6253  VMA_ASSERT(isString);
    6254  }
    6255 
    6256  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6257  currItem.valueCount % 2 != 0)
    6258  {
    6259  m_SB.Add(": ");
    6260  }
    6261  else if(currItem.valueCount > 0)
    6262  {
    6263  m_SB.Add(", ");
    6264  WriteIndent();
    6265  }
    6266  else
    6267  {
    6268  WriteIndent();
    6269  }
    6270  ++currItem.valueCount;
    6271  }
    6272 }
    6273 
    6274 void VmaJsonWriter::WriteIndent(bool oneLess)
    6275 {
    6276  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6277  {
    6278  m_SB.AddNewLine();
    6279 
    6280  size_t count = m_Stack.size();
    6281  if(count > 0 && oneLess)
    6282  {
    6283  --count;
    6284  }
    6285  for(size_t i = 0; i < count; ++i)
    6286  {
    6287  m_SB.Add(INDENT);
    6288  }
    6289  }
    6290 }
    6291 
    6292 #endif // #if VMA_STATS_STRING_ENABLED
    6293 
    6295 
    6296 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6297 {
    6298  if(IsUserDataString())
    6299  {
    6300  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6301 
    6302  FreeUserDataString(hAllocator);
    6303 
    6304  if(pUserData != VMA_NULL)
    6305  {
    6306  const char* const newStrSrc = (char*)pUserData;
    6307  const size_t newStrLen = strlen(newStrSrc);
    6308  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6309  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6310  m_pUserData = newStrDst;
    6311  }
    6312  }
    6313  else
    6314  {
    6315  m_pUserData = pUserData;
    6316  }
    6317 }
    6318 
    6319 void VmaAllocation_T::ChangeBlockAllocation(
    6320  VmaAllocator hAllocator,
    6321  VmaDeviceMemoryBlock* block,
    6322  VkDeviceSize offset)
    6323 {
    6324  VMA_ASSERT(block != VMA_NULL);
    6325  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6326 
    6327  // Move mapping reference counter from old block to new block.
    6328  if(block != m_BlockAllocation.m_Block)
    6329  {
    6330  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6331  if(IsPersistentMap())
    6332  ++mapRefCount;
    6333  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6334  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6335  }
    6336 
    6337  m_BlockAllocation.m_Block = block;
    6338  m_BlockAllocation.m_Offset = offset;
    6339 }
    6340 
    6341 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6342 {
    6343  VMA_ASSERT(newSize > 0);
    6344  m_Size = newSize;
    6345 }
    6346 
    6347 VkDeviceSize VmaAllocation_T::GetOffset() const
    6348 {
    6349  switch(m_Type)
    6350  {
    6351  case ALLOCATION_TYPE_BLOCK:
    6352  return m_BlockAllocation.m_Offset;
    6353  case ALLOCATION_TYPE_DEDICATED:
    6354  return 0;
    6355  default:
    6356  VMA_ASSERT(0);
    6357  return 0;
    6358  }
    6359 }
    6360 
    6361 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6362 {
    6363  switch(m_Type)
    6364  {
    6365  case ALLOCATION_TYPE_BLOCK:
    6366  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6367  case ALLOCATION_TYPE_DEDICATED:
    6368  return m_DedicatedAllocation.m_hMemory;
    6369  default:
    6370  VMA_ASSERT(0);
    6371  return VK_NULL_HANDLE;
    6372  }
    6373 }
    6374 
    6375 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6376 {
    6377  switch(m_Type)
    6378  {
    6379  case ALLOCATION_TYPE_BLOCK:
    6380  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6381  case ALLOCATION_TYPE_DEDICATED:
    6382  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6383  default:
    6384  VMA_ASSERT(0);
    6385  return UINT32_MAX;
    6386  }
    6387 }
    6388 
    6389 void* VmaAllocation_T::GetMappedData() const
    6390 {
    6391  switch(m_Type)
    6392  {
    6393  case ALLOCATION_TYPE_BLOCK:
    6394  if(m_MapCount != 0)
    6395  {
    6396  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6397  VMA_ASSERT(pBlockData != VMA_NULL);
    6398  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6399  }
    6400  else
    6401  {
    6402  return VMA_NULL;
    6403  }
    6404  break;
    6405  case ALLOCATION_TYPE_DEDICATED:
    6406  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6407  return m_DedicatedAllocation.m_pMappedData;
    6408  default:
    6409  VMA_ASSERT(0);
    6410  return VMA_NULL;
    6411  }
    6412 }
    6413 
    6414 bool VmaAllocation_T::CanBecomeLost() const
    6415 {
    6416  switch(m_Type)
    6417  {
    6418  case ALLOCATION_TYPE_BLOCK:
    6419  return m_BlockAllocation.m_CanBecomeLost;
    6420  case ALLOCATION_TYPE_DEDICATED:
    6421  return false;
    6422  default:
    6423  VMA_ASSERT(0);
    6424  return false;
    6425  }
    6426 }
    6427 
    6428 VmaPool VmaAllocation_T::GetPool() const
    6429 {
    6430  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6431  return m_BlockAllocation.m_hPool;
    6432 }
    6433 
    6434 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6435 {
    6436  VMA_ASSERT(CanBecomeLost());
    6437 
    6438  /*
    6439  Warning: This is a carefully designed algorithm.
    6440  Do not modify unless you really know what you're doing :)
    6441  */
    6442  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6443  for(;;)
    6444  {
    6445  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6446  {
    6447  VMA_ASSERT(0);
    6448  return false;
    6449  }
    6450  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6451  {
    6452  return false;
    6453  }
    6454  else // Last use time earlier than current time.
    6455  {
    6456  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6457  {
    6458  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6459  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6460  return true;
    6461  }
    6462  }
    6463  }
    6464 }
    6465 
    6466 #if VMA_STATS_STRING_ENABLED
    6467 
    6468 // Correspond to values of enum VmaSuballocationType.
    6469 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6470  "FREE",
    6471  "UNKNOWN",
    6472  "BUFFER",
    6473  "IMAGE_UNKNOWN",
    6474  "IMAGE_LINEAR",
    6475  "IMAGE_OPTIMAL",
    6476 };
    6477 
    6478 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6479 {
    6480  json.WriteString("Type");
    6481  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6482 
    6483  json.WriteString("Size");
    6484  json.WriteNumber(m_Size);
    6485 
    6486  if(m_pUserData != VMA_NULL)
    6487  {
    6488  json.WriteString("UserData");
    6489  if(IsUserDataString())
    6490  {
    6491  json.WriteString((const char*)m_pUserData);
    6492  }
    6493  else
    6494  {
    6495  json.BeginString();
    6496  json.ContinueString_Pointer(m_pUserData);
    6497  json.EndString();
    6498  }
    6499  }
    6500 
    6501  json.WriteString("CreationFrameIndex");
    6502  json.WriteNumber(m_CreationFrameIndex);
    6503 
    6504  json.WriteString("LastUseFrameIndex");
    6505  json.WriteNumber(GetLastUseFrameIndex());
    6506 
    6507  if(m_BufferImageUsage != 0)
    6508  {
    6509  json.WriteString("Usage");
    6510  json.WriteNumber(m_BufferImageUsage);
    6511  }
    6512 }
    6513 
    6514 #endif
    6515 
    6516 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6517 {
    6518  VMA_ASSERT(IsUserDataString());
    6519  if(m_pUserData != VMA_NULL)
    6520  {
    6521  char* const oldStr = (char*)m_pUserData;
    6522  const size_t oldStrLen = strlen(oldStr);
    6523  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6524  m_pUserData = VMA_NULL;
    6525  }
    6526 }
    6527 
    6528 void VmaAllocation_T::BlockAllocMap()
    6529 {
    6530  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6531 
    6532  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6533  {
    6534  ++m_MapCount;
    6535  }
    6536  else
    6537  {
    6538  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6539  }
    6540 }
    6541 
    6542 void VmaAllocation_T::BlockAllocUnmap()
    6543 {
    6544  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6545 
    6546  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6547  {
    6548  --m_MapCount;
    6549  }
    6550  else
    6551  {
    6552  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6553  }
    6554 }
    6555 
    6556 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6557 {
    6558  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6559 
    6560  if(m_MapCount != 0)
    6561  {
    6562  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6563  {
    6564  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6565  *ppData = m_DedicatedAllocation.m_pMappedData;
    6566  ++m_MapCount;
    6567  return VK_SUCCESS;
    6568  }
    6569  else
    6570  {
    6571  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6572  return VK_ERROR_MEMORY_MAP_FAILED;
    6573  }
    6574  }
    6575  else
    6576  {
    6577  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6578  hAllocator->m_hDevice,
    6579  m_DedicatedAllocation.m_hMemory,
    6580  0, // offset
    6581  VK_WHOLE_SIZE,
    6582  0, // flags
    6583  ppData);
    6584  if(result == VK_SUCCESS)
    6585  {
    6586  m_DedicatedAllocation.m_pMappedData = *ppData;
    6587  m_MapCount = 1;
    6588  }
    6589  return result;
    6590  }
    6591 }
    6592 
    6593 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6594 {
    6595  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6596 
    6597  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6598  {
    6599  --m_MapCount;
    6600  if(m_MapCount == 0)
    6601  {
    6602  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6603  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6604  hAllocator->m_hDevice,
    6605  m_DedicatedAllocation.m_hMemory);
    6606  }
    6607  }
    6608  else
    6609  {
    6610  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6611  }
    6612 }
    6613 
    6614 #if VMA_STATS_STRING_ENABLED
    6615 
    6616 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6617 {
    6618  json.BeginObject();
    6619 
    6620  json.WriteString("Blocks");
    6621  json.WriteNumber(stat.blockCount);
    6622 
    6623  json.WriteString("Allocations");
    6624  json.WriteNumber(stat.allocationCount);
    6625 
    6626  json.WriteString("UnusedRanges");
    6627  json.WriteNumber(stat.unusedRangeCount);
    6628 
    6629  json.WriteString("UsedBytes");
    6630  json.WriteNumber(stat.usedBytes);
    6631 
    6632  json.WriteString("UnusedBytes");
    6633  json.WriteNumber(stat.unusedBytes);
    6634 
    6635  if(stat.allocationCount > 1)
    6636  {
    6637  json.WriteString("AllocationSize");
    6638  json.BeginObject(true);
    6639  json.WriteString("Min");
    6640  json.WriteNumber(stat.allocationSizeMin);
    6641  json.WriteString("Avg");
    6642  json.WriteNumber(stat.allocationSizeAvg);
    6643  json.WriteString("Max");
    6644  json.WriteNumber(stat.allocationSizeMax);
    6645  json.EndObject();
    6646  }
    6647 
    6648  if(stat.unusedRangeCount > 1)
    6649  {
    6650  json.WriteString("UnusedRangeSize");
    6651  json.BeginObject(true);
    6652  json.WriteString("Min");
    6653  json.WriteNumber(stat.unusedRangeSizeMin);
    6654  json.WriteString("Avg");
    6655  json.WriteNumber(stat.unusedRangeSizeAvg);
    6656  json.WriteString("Max");
    6657  json.WriteNumber(stat.unusedRangeSizeMax);
    6658  json.EndObject();
    6659  }
    6660 
    6661  json.EndObject();
    6662 }
    6663 
    6664 #endif // #if VMA_STATS_STRING_ENABLED
    6665 
    6666 struct VmaSuballocationItemSizeLess
    6667 {
    6668  bool operator()(
    6669  const VmaSuballocationList::iterator lhs,
    6670  const VmaSuballocationList::iterator rhs) const
    6671  {
    6672  return lhs->size < rhs->size;
    6673  }
    6674  bool operator()(
    6675  const VmaSuballocationList::iterator lhs,
    6676  VkDeviceSize rhsSize) const
    6677  {
    6678  return lhs->size < rhsSize;
    6679  }
    6680 };
    6681 
    6682 
    6684 // class VmaBlockMetadata
    6685 
    6686 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6687  m_Size(0),
    6688  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6689 {
    6690 }
    6691 
    6692 #if VMA_STATS_STRING_ENABLED
    6693 
    6694 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6695  VkDeviceSize unusedBytes,
    6696  size_t allocationCount,
    6697  size_t unusedRangeCount) const
    6698 {
    6699  json.BeginObject();
    6700 
    6701  json.WriteString("TotalBytes");
    6702  json.WriteNumber(GetSize());
    6703 
    6704  json.WriteString("UnusedBytes");
    6705  json.WriteNumber(unusedBytes);
    6706 
    6707  json.WriteString("Allocations");
    6708  json.WriteNumber((uint64_t)allocationCount);
    6709 
    6710  json.WriteString("UnusedRanges");
    6711  json.WriteNumber((uint64_t)unusedRangeCount);
    6712 
    6713  json.WriteString("Suballocations");
    6714  json.BeginArray();
    6715 }
    6716 
    6717 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6718  VkDeviceSize offset,
    6719  VmaAllocation hAllocation) const
    6720 {
    6721  json.BeginObject(true);
    6722 
    6723  json.WriteString("Offset");
    6724  json.WriteNumber(offset);
    6725 
    6726  hAllocation->PrintParameters(json);
    6727 
    6728  json.EndObject();
    6729 }
    6730 
    6731 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6732  VkDeviceSize offset,
    6733  VkDeviceSize size) const
    6734 {
    6735  json.BeginObject(true);
    6736 
    6737  json.WriteString("Offset");
    6738  json.WriteNumber(offset);
    6739 
    6740  json.WriteString("Type");
    6741  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6742 
    6743  json.WriteString("Size");
    6744  json.WriteNumber(size);
    6745 
    6746  json.EndObject();
    6747 }
    6748 
    6749 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6750 {
    6751  json.EndArray();
    6752  json.EndObject();
    6753 }
    6754 
    6755 #endif // #if VMA_STATS_STRING_ENABLED
    6756 
    6758 // class VmaBlockMetadata_Generic
    6759 
    6760 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6761  VmaBlockMetadata(hAllocator),
    6762  m_FreeCount(0),
    6763  m_SumFreeSize(0),
    6764  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6765  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6766 {
    6767 }
    6768 
    6769 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6770 {
    6771 }
    6772 
    6773 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6774 {
    6775  VmaBlockMetadata::Init(size);
    6776 
    6777  m_FreeCount = 1;
    6778  m_SumFreeSize = size;
    6779 
    6780  VmaSuballocation suballoc = {};
    6781  suballoc.offset = 0;
    6782  suballoc.size = size;
    6783  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6784  suballoc.hAllocation = VK_NULL_HANDLE;
    6785 
    6786  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6787  m_Suballocations.push_back(suballoc);
    6788  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6789  --suballocItem;
    6790  m_FreeSuballocationsBySize.push_back(suballocItem);
    6791 }
    6792 
    6793 bool VmaBlockMetadata_Generic::Validate() const
    6794 {
    6795  VMA_VALIDATE(!m_Suballocations.empty());
    6796 
    6797  // Expected offset of new suballocation as calculated from previous ones.
    6798  VkDeviceSize calculatedOffset = 0;
    6799  // Expected number of free suballocations as calculated from traversing their list.
    6800  uint32_t calculatedFreeCount = 0;
    6801  // Expected sum size of free suballocations as calculated from traversing their list.
    6802  VkDeviceSize calculatedSumFreeSize = 0;
    6803  // Expected number of free suballocations that should be registered in
    6804  // m_FreeSuballocationsBySize calculated from traversing their list.
    6805  size_t freeSuballocationsToRegister = 0;
    6806  // True if previous visited suballocation was free.
    6807  bool prevFree = false;
    6808 
    6809  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6810  suballocItem != m_Suballocations.cend();
    6811  ++suballocItem)
    6812  {
    6813  const VmaSuballocation& subAlloc = *suballocItem;
    6814 
    6815  // Actual offset of this suballocation doesn't match expected one.
    6816  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6817 
    6818  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6819  // Two adjacent free suballocations are invalid. They should be merged.
    6820  VMA_VALIDATE(!prevFree || !currFree);
    6821 
    6822  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6823 
    6824  if(currFree)
    6825  {
    6826  calculatedSumFreeSize += subAlloc.size;
    6827  ++calculatedFreeCount;
    6828  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6829  {
    6830  ++freeSuballocationsToRegister;
    6831  }
    6832 
    6833  // Margin required between allocations - every free space must be at least that large.
    6834  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6835  }
    6836  else
    6837  {
    6838  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6839  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6840 
    6841  // Margin required between allocations - previous allocation must be free.
    6842  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6843  }
    6844 
    6845  calculatedOffset += subAlloc.size;
    6846  prevFree = currFree;
    6847  }
    6848 
    6849  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6850  // match expected one.
    6851  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6852 
    6853  VkDeviceSize lastSize = 0;
    6854  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6855  {
    6856  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6857 
    6858  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6859  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6860  // They must be sorted by size ascending.
    6861  VMA_VALIDATE(suballocItem->size >= lastSize);
    6862 
    6863  lastSize = suballocItem->size;
    6864  }
    6865 
    6866  // Check if totals match calculacted values.
    6867  VMA_VALIDATE(ValidateFreeSuballocationList());
    6868  VMA_VALIDATE(calculatedOffset == GetSize());
    6869  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6870  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6871 
    6872  return true;
    6873 }
    6874 
    6875 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6876 {
    6877  if(!m_FreeSuballocationsBySize.empty())
    6878  {
    6879  return m_FreeSuballocationsBySize.back()->size;
    6880  }
    6881  else
    6882  {
    6883  return 0;
    6884  }
    6885 }
    6886 
    6887 bool VmaBlockMetadata_Generic::IsEmpty() const
    6888 {
    6889  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6890 }
    6891 
    6892 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6893 {
    6894  outInfo.blockCount = 1;
    6895 
    6896  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6897  outInfo.allocationCount = rangeCount - m_FreeCount;
    6898  outInfo.unusedRangeCount = m_FreeCount;
    6899 
    6900  outInfo.unusedBytes = m_SumFreeSize;
    6901  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6902 
    6903  outInfo.allocationSizeMin = UINT64_MAX;
    6904  outInfo.allocationSizeMax = 0;
    6905  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6906  outInfo.unusedRangeSizeMax = 0;
    6907 
    6908  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6909  suballocItem != m_Suballocations.cend();
    6910  ++suballocItem)
    6911  {
    6912  const VmaSuballocation& suballoc = *suballocItem;
    6913  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6914  {
    6915  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6916  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6917  }
    6918  else
    6919  {
    6920  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6921  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6922  }
    6923  }
    6924 }
    6925 
    6926 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6927 {
    6928  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6929 
    6930  inoutStats.size += GetSize();
    6931  inoutStats.unusedSize += m_SumFreeSize;
    6932  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6933  inoutStats.unusedRangeCount += m_FreeCount;
    6934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6935 }
    6936 
    6937 #if VMA_STATS_STRING_ENABLED
    6938 
    6939 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6940 {
    6941  PrintDetailedMap_Begin(json,
    6942  m_SumFreeSize, // unusedBytes
    6943  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6944  m_FreeCount); // unusedRangeCount
    6945 
    6946  size_t i = 0;
    6947  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6948  suballocItem != m_Suballocations.cend();
    6949  ++suballocItem, ++i)
    6950  {
    6951  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6952  {
    6953  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6954  }
    6955  else
    6956  {
    6957  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6958  }
    6959  }
    6960 
    6961  PrintDetailedMap_End(json);
    6962 }
    6963 
    6964 #endif // #if VMA_STATS_STRING_ENABLED
    6965 
    6966 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6967  uint32_t currentFrameIndex,
    6968  uint32_t frameInUseCount,
    6969  VkDeviceSize bufferImageGranularity,
    6970  VkDeviceSize allocSize,
    6971  VkDeviceSize allocAlignment,
    6972  bool upperAddress,
    6973  VmaSuballocationType allocType,
    6974  bool canMakeOtherLost,
    6975  uint32_t strategy,
    6976  VmaAllocationRequest* pAllocationRequest)
    6977 {
    6978  VMA_ASSERT(allocSize > 0);
    6979  VMA_ASSERT(!upperAddress);
    6980  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6981  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6982  VMA_HEAVY_ASSERT(Validate());
    6983 
    6984  // There is not enough total free space in this block to fullfill the request: Early return.
    6985  if(canMakeOtherLost == false &&
    6986  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6987  {
    6988  return false;
    6989  }
    6990 
    6991  // New algorithm, efficiently searching freeSuballocationsBySize.
    6992  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6993  if(freeSuballocCount > 0)
    6994  {
    6996  {
    6997  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    6998  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    6999  m_FreeSuballocationsBySize.data(),
    7000  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7001  allocSize + 2 * VMA_DEBUG_MARGIN,
    7002  VmaSuballocationItemSizeLess());
    7003  size_t index = it - m_FreeSuballocationsBySize.data();
    7004  for(; index < freeSuballocCount; ++index)
    7005  {
    7006  if(CheckAllocation(
    7007  currentFrameIndex,
    7008  frameInUseCount,
    7009  bufferImageGranularity,
    7010  allocSize,
    7011  allocAlignment,
    7012  allocType,
    7013  m_FreeSuballocationsBySize[index],
    7014  false, // canMakeOtherLost
    7015  &pAllocationRequest->offset,
    7016  &pAllocationRequest->itemsToMakeLostCount,
    7017  &pAllocationRequest->sumFreeSize,
    7018  &pAllocationRequest->sumItemSize))
    7019  {
    7020  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7021  return true;
    7022  }
    7023  }
    7024  }
    7025  else // WORST_FIT, FIRST_FIT
    7026  {
    7027  // Search staring from biggest suballocations.
    7028  for(size_t index = freeSuballocCount; index--; )
    7029  {
    7030  if(CheckAllocation(
    7031  currentFrameIndex,
    7032  frameInUseCount,
    7033  bufferImageGranularity,
    7034  allocSize,
    7035  allocAlignment,
    7036  allocType,
    7037  m_FreeSuballocationsBySize[index],
    7038  false, // canMakeOtherLost
    7039  &pAllocationRequest->offset,
    7040  &pAllocationRequest->itemsToMakeLostCount,
    7041  &pAllocationRequest->sumFreeSize,
    7042  &pAllocationRequest->sumItemSize))
    7043  {
    7044  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7045  return true;
    7046  }
    7047  }
    7048  }
    7049  }
    7050 
    7051  if(canMakeOtherLost)
    7052  {
    7053  // Brute-force algorithm. TODO: Come up with something better.
    7054 
    7055  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7056  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7057 
    7058  VmaAllocationRequest tmpAllocRequest = {};
    7059  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7060  suballocIt != m_Suballocations.end();
    7061  ++suballocIt)
    7062  {
    7063  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7064  suballocIt->hAllocation->CanBecomeLost())
    7065  {
    7066  if(CheckAllocation(
    7067  currentFrameIndex,
    7068  frameInUseCount,
    7069  bufferImageGranularity,
    7070  allocSize,
    7071  allocAlignment,
    7072  allocType,
    7073  suballocIt,
    7074  canMakeOtherLost,
    7075  &tmpAllocRequest.offset,
    7076  &tmpAllocRequest.itemsToMakeLostCount,
    7077  &tmpAllocRequest.sumFreeSize,
    7078  &tmpAllocRequest.sumItemSize))
    7079  {
    7080  tmpAllocRequest.item = suballocIt;
    7081 
    7082  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7084  {
    7085  *pAllocationRequest = tmpAllocRequest;
    7086  }
    7087  }
    7088  }
    7089  }
    7090 
    7091  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7092  {
    7093  return true;
    7094  }
    7095  }
    7096 
    7097  return false;
    7098 }
    7099 
    7100 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7101  uint32_t currentFrameIndex,
    7102  uint32_t frameInUseCount,
    7103  VmaAllocationRequest* pAllocationRequest)
    7104 {
    7105  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7106  {
    7107  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7108  {
    7109  ++pAllocationRequest->item;
    7110  }
    7111  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7112  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7113  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7114  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7115  {
    7116  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7117  --pAllocationRequest->itemsToMakeLostCount;
    7118  }
    7119  else
    7120  {
    7121  return false;
    7122  }
    7123  }
    7124 
    7125  VMA_HEAVY_ASSERT(Validate());
    7126  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7127  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7128 
    7129  return true;
    7130 }
    7131 
    7132 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7133 {
    7134  uint32_t lostAllocationCount = 0;
    7135  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7136  it != m_Suballocations.end();
    7137  ++it)
    7138  {
    7139  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7140  it->hAllocation->CanBecomeLost() &&
    7141  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7142  {
    7143  it = FreeSuballocation(it);
    7144  ++lostAllocationCount;
    7145  }
    7146  }
    7147  return lostAllocationCount;
    7148 }
    7149 
    7150 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7151 {
    7152  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7153  it != m_Suballocations.end();
    7154  ++it)
    7155  {
    7156  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7157  {
    7158  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7159  {
    7160  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7161  return VK_ERROR_VALIDATION_FAILED_EXT;
    7162  }
    7163  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7164  {
    7165  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7166  return VK_ERROR_VALIDATION_FAILED_EXT;
    7167  }
    7168  }
    7169  }
    7170 
    7171  return VK_SUCCESS;
    7172 }
    7173 
    7174 void VmaBlockMetadata_Generic::Alloc(
    7175  const VmaAllocationRequest& request,
    7176  VmaSuballocationType type,
    7177  VkDeviceSize allocSize,
    7178  bool upperAddress,
    7179  VmaAllocation hAllocation)
    7180 {
    7181  VMA_ASSERT(!upperAddress);
    7182  VMA_ASSERT(request.item != m_Suballocations.end());
    7183  VmaSuballocation& suballoc = *request.item;
    7184  // Given suballocation is a free block.
    7185  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7186  // Given offset is inside this suballocation.
    7187  VMA_ASSERT(request.offset >= suballoc.offset);
    7188  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7189  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7190  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7191 
    7192  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7193  // it to become used.
    7194  UnregisterFreeSuballocation(request.item);
    7195 
    7196  suballoc.offset = request.offset;
    7197  suballoc.size = allocSize;
    7198  suballoc.type = type;
    7199  suballoc.hAllocation = hAllocation;
    7200 
    7201  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7202  if(paddingEnd)
    7203  {
    7204  VmaSuballocation paddingSuballoc = {};
    7205  paddingSuballoc.offset = request.offset + allocSize;
    7206  paddingSuballoc.size = paddingEnd;
    7207  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7208  VmaSuballocationList::iterator next = request.item;
    7209  ++next;
    7210  const VmaSuballocationList::iterator paddingEndItem =
    7211  m_Suballocations.insert(next, paddingSuballoc);
    7212  RegisterFreeSuballocation(paddingEndItem);
    7213  }
    7214 
    7215  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7216  if(paddingBegin)
    7217  {
    7218  VmaSuballocation paddingSuballoc = {};
    7219  paddingSuballoc.offset = request.offset - paddingBegin;
    7220  paddingSuballoc.size = paddingBegin;
    7221  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7222  const VmaSuballocationList::iterator paddingBeginItem =
    7223  m_Suballocations.insert(request.item, paddingSuballoc);
    7224  RegisterFreeSuballocation(paddingBeginItem);
    7225  }
    7226 
    7227  // Update totals.
    7228  m_FreeCount = m_FreeCount - 1;
    7229  if(paddingBegin > 0)
    7230  {
    7231  ++m_FreeCount;
    7232  }
    7233  if(paddingEnd > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  m_SumFreeSize -= allocSize;
    7238 }
    7239 
    7240 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7241 {
    7242  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7243  suballocItem != m_Suballocations.end();
    7244  ++suballocItem)
    7245  {
    7246  VmaSuballocation& suballoc = *suballocItem;
    7247  if(suballoc.hAllocation == allocation)
    7248  {
    7249  FreeSuballocation(suballocItem);
    7250  VMA_HEAVY_ASSERT(Validate());
    7251  return;
    7252  }
    7253  }
    7254  VMA_ASSERT(0 && "Not found!");
    7255 }
    7256 
    7257 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7258 {
    7259  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7260  suballocItem != m_Suballocations.end();
    7261  ++suballocItem)
    7262  {
    7263  VmaSuballocation& suballoc = *suballocItem;
    7264  if(suballoc.offset == offset)
    7265  {
    7266  FreeSuballocation(suballocItem);
    7267  return;
    7268  }
    7269  }
    7270  VMA_ASSERT(0 && "Not found!");
    7271 }
    7272 
    7273 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7274 {
    7275  typedef VmaSuballocationList::iterator iter_type;
    7276  for(iter_type suballocItem = m_Suballocations.begin();
    7277  suballocItem != m_Suballocations.end();
    7278  ++suballocItem)
    7279  {
    7280  VmaSuballocation& suballoc = *suballocItem;
    7281  if(suballoc.hAllocation == alloc)
    7282  {
    7283  iter_type nextItem = suballocItem;
    7284  ++nextItem;
    7285 
    7286  // Should have been ensured on higher level.
    7287  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7288 
    7289  // Shrinking.
    7290  if(newSize < alloc->GetSize())
    7291  {
    7292  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7293 
    7294  // There is next item.
    7295  if(nextItem != m_Suballocations.end())
    7296  {
    7297  // Next item is free.
    7298  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7299  {
    7300  // Grow this next item backward.
    7301  UnregisterFreeSuballocation(nextItem);
    7302  nextItem->offset -= sizeDiff;
    7303  nextItem->size += sizeDiff;
    7304  RegisterFreeSuballocation(nextItem);
    7305  }
    7306  // Next item is not free.
    7307  else
    7308  {
    7309  // Create free item after current one.
    7310  VmaSuballocation newFreeSuballoc;
    7311  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7312  newFreeSuballoc.offset = suballoc.offset + newSize;
    7313  newFreeSuballoc.size = sizeDiff;
    7314  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7315  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7316  RegisterFreeSuballocation(newFreeSuballocIt);
    7317 
    7318  ++m_FreeCount;
    7319  }
    7320  }
    7321  // This is the last item.
    7322  else
    7323  {
    7324  // Create free item at the end.
    7325  VmaSuballocation newFreeSuballoc;
    7326  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7327  newFreeSuballoc.offset = suballoc.offset + newSize;
    7328  newFreeSuballoc.size = sizeDiff;
    7329  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7330  m_Suballocations.push_back(newFreeSuballoc);
    7331 
    7332  iter_type newFreeSuballocIt = m_Suballocations.end();
    7333  RegisterFreeSuballocation(--newFreeSuballocIt);
    7334 
    7335  ++m_FreeCount;
    7336  }
    7337 
    7338  suballoc.size = newSize;
    7339  m_SumFreeSize += sizeDiff;
    7340  }
    7341  // Growing.
    7342  else
    7343  {
    7344  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7345 
    7346  // There is next item.
    7347  if(nextItem != m_Suballocations.end())
    7348  {
    7349  // Next item is free.
    7350  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7351  {
    7352  // There is not enough free space, including margin.
    7353  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7354  {
    7355  return false;
    7356  }
    7357 
    7358  // There is more free space than required.
    7359  if(nextItem->size > sizeDiff)
    7360  {
    7361  // Move and shrink this next item.
    7362  UnregisterFreeSuballocation(nextItem);
    7363  nextItem->offset += sizeDiff;
    7364  nextItem->size -= sizeDiff;
    7365  RegisterFreeSuballocation(nextItem);
    7366  }
    7367  // There is exactly the amount of free space required.
    7368  else
    7369  {
    7370  // Remove this next free item.
    7371  UnregisterFreeSuballocation(nextItem);
    7372  m_Suballocations.erase(nextItem);
    7373  --m_FreeCount;
    7374  }
    7375  }
    7376  // Next item is not free - there is no space to grow.
    7377  else
    7378  {
    7379  return false;
    7380  }
    7381  }
    7382  // This is the last item - there is no space to grow.
    7383  else
    7384  {
    7385  return false;
    7386  }
    7387 
    7388  suballoc.size = newSize;
    7389  m_SumFreeSize -= sizeDiff;
    7390  }
    7391 
    7392  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7393  return true;
    7394  }
    7395  }
    7396  VMA_ASSERT(0 && "Not found!");
    7397  return false;
    7398 }
    7399 
    7400 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7401 {
    7402  VkDeviceSize lastSize = 0;
    7403  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7404  {
    7405  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7406 
    7407  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7408  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7409  VMA_VALIDATE(it->size >= lastSize);
    7410  lastSize = it->size;
    7411  }
    7412  return true;
    7413 }
    7414 
    7415 bool VmaBlockMetadata_Generic::CheckAllocation(
    7416  uint32_t currentFrameIndex,
    7417  uint32_t frameInUseCount,
    7418  VkDeviceSize bufferImageGranularity,
    7419  VkDeviceSize allocSize,
    7420  VkDeviceSize allocAlignment,
    7421  VmaSuballocationType allocType,
    7422  VmaSuballocationList::const_iterator suballocItem,
    7423  bool canMakeOtherLost,
    7424  VkDeviceSize* pOffset,
    7425  size_t* itemsToMakeLostCount,
    7426  VkDeviceSize* pSumFreeSize,
    7427  VkDeviceSize* pSumItemSize) const
    7428 {
    7429  VMA_ASSERT(allocSize > 0);
    7430  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7431  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7432  VMA_ASSERT(pOffset != VMA_NULL);
    7433 
    7434  *itemsToMakeLostCount = 0;
    7435  *pSumFreeSize = 0;
    7436  *pSumItemSize = 0;
    7437 
    7438  if(canMakeOtherLost)
    7439  {
    7440  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7441  {
    7442  *pSumFreeSize = suballocItem->size;
    7443  }
    7444  else
    7445  {
    7446  if(suballocItem->hAllocation->CanBecomeLost() &&
    7447  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7448  {
    7449  ++*itemsToMakeLostCount;
    7450  *pSumItemSize = suballocItem->size;
    7451  }
    7452  else
    7453  {
    7454  return false;
    7455  }
    7456  }
    7457 
    7458  // Remaining size is too small for this request: Early return.
    7459  if(GetSize() - suballocItem->offset < allocSize)
    7460  {
    7461  return false;
    7462  }
    7463 
    7464  // Start from offset equal to beginning of this suballocation.
    7465  *pOffset = suballocItem->offset;
    7466 
    7467  // Apply VMA_DEBUG_MARGIN at the beginning.
    7468  if(VMA_DEBUG_MARGIN > 0)
    7469  {
    7470  *pOffset += VMA_DEBUG_MARGIN;
    7471  }
    7472 
    7473  // Apply alignment.
    7474  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7475 
    7476  // Check previous suballocations for BufferImageGranularity conflicts.
    7477  // Make bigger alignment if necessary.
    7478  if(bufferImageGranularity > 1)
    7479  {
    7480  bool bufferImageGranularityConflict = false;
    7481  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7482  while(prevSuballocItem != m_Suballocations.cbegin())
    7483  {
    7484  --prevSuballocItem;
    7485  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7486  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7487  {
    7488  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7489  {
    7490  bufferImageGranularityConflict = true;
    7491  break;
    7492  }
    7493  }
    7494  else
    7495  // Already on previous page.
    7496  break;
    7497  }
    7498  if(bufferImageGranularityConflict)
    7499  {
    7500  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7501  }
    7502  }
    7503 
    7504  // Now that we have final *pOffset, check if we are past suballocItem.
    7505  // If yes, return false - this function should be called for another suballocItem as starting point.
    7506  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7507  {
    7508  return false;
    7509  }
    7510 
    7511  // Calculate padding at the beginning based on current offset.
    7512  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7513 
    7514  // Calculate required margin at the end.
    7515  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7516 
    7517  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7518  // Another early return check.
    7519  if(suballocItem->offset + totalSize > GetSize())
    7520  {
    7521  return false;
    7522  }
    7523 
    7524  // Advance lastSuballocItem until desired size is reached.
    7525  // Update itemsToMakeLostCount.
    7526  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7527  if(totalSize > suballocItem->size)
    7528  {
    7529  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7530  while(remainingSize > 0)
    7531  {
    7532  ++lastSuballocItem;
    7533  if(lastSuballocItem == m_Suballocations.cend())
    7534  {
    7535  return false;
    7536  }
    7537  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7538  {
    7539  *pSumFreeSize += lastSuballocItem->size;
    7540  }
    7541  else
    7542  {
    7543  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7544  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7545  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7546  {
    7547  ++*itemsToMakeLostCount;
    7548  *pSumItemSize += lastSuballocItem->size;
    7549  }
    7550  else
    7551  {
    7552  return false;
    7553  }
    7554  }
    7555  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7556  remainingSize - lastSuballocItem->size : 0;
    7557  }
    7558  }
    7559 
    7560  // Check next suballocations for BufferImageGranularity conflicts.
    7561  // If conflict exists, we must mark more allocations lost or fail.
    7562  if(bufferImageGranularity > 1)
    7563  {
    7564  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7565  ++nextSuballocItem;
    7566  while(nextSuballocItem != m_Suballocations.cend())
    7567  {
    7568  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7569  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7570  {
    7571  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7572  {
    7573  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7574  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7575  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7576  {
    7577  ++*itemsToMakeLostCount;
    7578  }
    7579  else
    7580  {
    7581  return false;
    7582  }
    7583  }
    7584  }
    7585  else
    7586  {
    7587  // Already on next page.
    7588  break;
    7589  }
    7590  ++nextSuballocItem;
    7591  }
    7592  }
    7593  }
    7594  else
    7595  {
    7596  const VmaSuballocation& suballoc = *suballocItem;
    7597  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7598 
    7599  *pSumFreeSize = suballoc.size;
    7600 
    7601  // Size of this suballocation is too small for this request: Early return.
    7602  if(suballoc.size < allocSize)
    7603  {
    7604  return false;
    7605  }
    7606 
    7607  // Start from offset equal to beginning of this suballocation.
    7608  *pOffset = suballoc.offset;
    7609 
    7610  // Apply VMA_DEBUG_MARGIN at the beginning.
    7611  if(VMA_DEBUG_MARGIN > 0)
    7612  {
    7613  *pOffset += VMA_DEBUG_MARGIN;
    7614  }
    7615 
    7616  // Apply alignment.
    7617  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7618 
    7619  // Check previous suballocations for BufferImageGranularity conflicts.
    7620  // Make bigger alignment if necessary.
    7621  if(bufferImageGranularity > 1)
    7622  {
    7623  bool bufferImageGranularityConflict = false;
    7624  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7625  while(prevSuballocItem != m_Suballocations.cbegin())
    7626  {
    7627  --prevSuballocItem;
    7628  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7629  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7630  {
    7631  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7632  {
    7633  bufferImageGranularityConflict = true;
    7634  break;
    7635  }
    7636  }
    7637  else
    7638  // Already on previous page.
    7639  break;
    7640  }
    7641  if(bufferImageGranularityConflict)
    7642  {
    7643  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7644  }
    7645  }
    7646 
    7647  // Calculate padding at the beginning based on current offset.
    7648  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7649 
    7650  // Calculate required margin at the end.
    7651  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7652 
    7653  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7654  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7655  {
    7656  return false;
    7657  }
    7658 
    7659  // Check next suballocations for BufferImageGranularity conflicts.
    7660  // If conflict exists, allocation cannot be made here.
    7661  if(bufferImageGranularity > 1)
    7662  {
    7663  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7664  ++nextSuballocItem;
    7665  while(nextSuballocItem != m_Suballocations.cend())
    7666  {
    7667  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7668  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7669  {
    7670  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7671  {
    7672  return false;
    7673  }
    7674  }
    7675  else
    7676  {
    7677  // Already on next page.
    7678  break;
    7679  }
    7680  ++nextSuballocItem;
    7681  }
    7682  }
    7683  }
    7684 
    7685  // All tests passed: Success. pOffset is already filled.
    7686  return true;
    7687 }
    7688 
    7689 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7690 {
    7691  VMA_ASSERT(item != m_Suballocations.end());
    7692  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7693 
    7694  VmaSuballocationList::iterator nextItem = item;
    7695  ++nextItem;
    7696  VMA_ASSERT(nextItem != m_Suballocations.end());
    7697  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7698 
    7699  item->size += nextItem->size;
    7700  --m_FreeCount;
    7701  m_Suballocations.erase(nextItem);
    7702 }
    7703 
    7704 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7705 {
    7706  // Change this suballocation to be marked as free.
    7707  VmaSuballocation& suballoc = *suballocItem;
    7708  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7709  suballoc.hAllocation = VK_NULL_HANDLE;
    7710 
    7711  // Update totals.
    7712  ++m_FreeCount;
    7713  m_SumFreeSize += suballoc.size;
    7714 
    7715  // Merge with previous and/or next suballocation if it's also free.
    7716  bool mergeWithNext = false;
    7717  bool mergeWithPrev = false;
    7718 
    7719  VmaSuballocationList::iterator nextItem = suballocItem;
    7720  ++nextItem;
    7721  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7722  {
    7723  mergeWithNext = true;
    7724  }
    7725 
    7726  VmaSuballocationList::iterator prevItem = suballocItem;
    7727  if(suballocItem != m_Suballocations.begin())
    7728  {
    7729  --prevItem;
    7730  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7731  {
    7732  mergeWithPrev = true;
    7733  }
    7734  }
    7735 
    7736  if(mergeWithNext)
    7737  {
    7738  UnregisterFreeSuballocation(nextItem);
    7739  MergeFreeWithNext(suballocItem);
    7740  }
    7741 
    7742  if(mergeWithPrev)
    7743  {
    7744  UnregisterFreeSuballocation(prevItem);
    7745  MergeFreeWithNext(prevItem);
    7746  RegisterFreeSuballocation(prevItem);
    7747  return prevItem;
    7748  }
    7749  else
    7750  {
    7751  RegisterFreeSuballocation(suballocItem);
    7752  return suballocItem;
    7753  }
    7754 }
    7755 
    7756 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7757 {
    7758  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7759  VMA_ASSERT(item->size > 0);
    7760 
    7761  // You may want to enable this validation at the beginning or at the end of
    7762  // this function, depending on what do you want to check.
    7763  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7764 
    7765  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7766  {
    7767  if(m_FreeSuballocationsBySize.empty())
    7768  {
    7769  m_FreeSuballocationsBySize.push_back(item);
    7770  }
    7771  else
    7772  {
    7773  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7774  }
    7775  }
    7776 
    7777  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7778 }
    7779 
    7780 
    7781 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7782 {
    7783  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7784  VMA_ASSERT(item->size > 0);
    7785 
    7786  // You may want to enable this validation at the beginning or at the end of
    7787  // this function, depending on what do you want to check.
    7788  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7789 
    7790  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7791  {
    7792  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7793  m_FreeSuballocationsBySize.data(),
    7794  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7795  item,
    7796  VmaSuballocationItemSizeLess());
    7797  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7798  index < m_FreeSuballocationsBySize.size();
    7799  ++index)
    7800  {
    7801  if(m_FreeSuballocationsBySize[index] == item)
    7802  {
    7803  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7804  return;
    7805  }
    7806  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7807  }
    7808  VMA_ASSERT(0 && "Not found.");
    7809  }
    7810 
    7811  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7812 }
    7813 
    7815 // class VmaBlockMetadata_Linear
    7816 
    7817 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7818  VmaBlockMetadata(hAllocator),
    7819  m_SumFreeSize(0),
    7820  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7821  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7822  m_1stVectorIndex(0),
    7823  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7824  m_1stNullItemsBeginCount(0),
    7825  m_1stNullItemsMiddleCount(0),
    7826  m_2ndNullItemsCount(0)
    7827 {
    7828 }
    7829 
    7830 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7831 {
    7832 }
    7833 
    7834 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7835 {
    7836  VmaBlockMetadata::Init(size);
    7837  m_SumFreeSize = size;
    7838 }
    7839 
    7840 bool VmaBlockMetadata_Linear::Validate() const
    7841 {
    7842  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7843  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7844 
    7845  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7846  VMA_VALIDATE(!suballocations1st.empty() ||
    7847  suballocations2nd.empty() ||
    7848  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7849 
    7850  if(!suballocations1st.empty())
    7851  {
    7852  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7853  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7854  // Null item at the end should be just pop_back().
    7855  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7856  }
    7857  if(!suballocations2nd.empty())
    7858  {
    7859  // Null item at the end should be just pop_back().
    7860  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7861  }
    7862 
    7863  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7864  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7865 
    7866  VkDeviceSize sumUsedSize = 0;
    7867  const size_t suballoc1stCount = suballocations1st.size();
    7868  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7869 
    7870  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7871  {
    7872  const size_t suballoc2ndCount = suballocations2nd.size();
    7873  size_t nullItem2ndCount = 0;
    7874  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7875  {
    7876  const VmaSuballocation& suballoc = suballocations2nd[i];
    7877  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7878 
    7879  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7880  VMA_VALIDATE(suballoc.offset >= offset);
    7881 
    7882  if(!currFree)
    7883  {
    7884  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7885  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7886  sumUsedSize += suballoc.size;
    7887  }
    7888  else
    7889  {
    7890  ++nullItem2ndCount;
    7891  }
    7892 
    7893  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7894  }
    7895 
    7896  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7897  }
    7898 
    7899  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7900  {
    7901  const VmaSuballocation& suballoc = suballocations1st[i];
    7902  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7903  suballoc.hAllocation == VK_NULL_HANDLE);
    7904  }
    7905 
    7906  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7907 
    7908  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7909  {
    7910  const VmaSuballocation& suballoc = suballocations1st[i];
    7911  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7912 
    7913  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7914  VMA_VALIDATE(suballoc.offset >= offset);
    7915  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7916 
    7917  if(!currFree)
    7918  {
    7919  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7920  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7921  sumUsedSize += suballoc.size;
    7922  }
    7923  else
    7924  {
    7925  ++nullItem1stCount;
    7926  }
    7927 
    7928  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7929  }
    7930  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7931 
    7932  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7933  {
    7934  const size_t suballoc2ndCount = suballocations2nd.size();
    7935  size_t nullItem2ndCount = 0;
    7936  for(size_t i = suballoc2ndCount; i--; )
    7937  {
    7938  const VmaSuballocation& suballoc = suballocations2nd[i];
    7939  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7940 
    7941  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7942  VMA_VALIDATE(suballoc.offset >= offset);
    7943 
    7944  if(!currFree)
    7945  {
    7946  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7947  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7948  sumUsedSize += suballoc.size;
    7949  }
    7950  else
    7951  {
    7952  ++nullItem2ndCount;
    7953  }
    7954 
    7955  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7956  }
    7957 
    7958  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7959  }
    7960 
    7961  VMA_VALIDATE(offset <= GetSize());
    7962  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7963 
    7964  return true;
    7965 }
    7966 
    7967 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7968 {
    7969  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7970  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7971 }
    7972 
    7973 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7974 {
    7975  const VkDeviceSize size = GetSize();
    7976 
    7977  /*
    7978  We don't consider gaps inside allocation vectors with freed allocations because
    7979  they are not suitable for reuse in linear allocator. We consider only space that
    7980  is available for new allocations.
    7981  */
    7982  if(IsEmpty())
    7983  {
    7984  return size;
    7985  }
    7986 
    7987  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7988 
    7989  switch(m_2ndVectorMode)
    7990  {
    7991  case SECOND_VECTOR_EMPTY:
    7992  /*
    7993  Available space is after end of 1st, as well as before beginning of 1st (which
    7994  whould make it a ring buffer).
    7995  */
    7996  {
    7997  const size_t suballocations1stCount = suballocations1st.size();
    7998  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    7999  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8000  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8001  return VMA_MAX(
    8002  firstSuballoc.offset,
    8003  size - (lastSuballoc.offset + lastSuballoc.size));
    8004  }
    8005  break;
    8006 
    8007  case SECOND_VECTOR_RING_BUFFER:
    8008  /*
    8009  Available space is only between end of 2nd and beginning of 1st.
    8010  */
    8011  {
    8012  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8013  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8014  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8015  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8016  }
    8017  break;
    8018 
    8019  case SECOND_VECTOR_DOUBLE_STACK:
    8020  /*
    8021  Available space is only between end of 1st and top of 2nd.
    8022  */
    8023  {
    8024  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8025  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8026  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8027  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8028  }
    8029  break;
    8030 
    8031  default:
    8032  VMA_ASSERT(0);
    8033  return 0;
    8034  }
    8035 }
    8036 
    8037 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8038 {
    8039  const VkDeviceSize size = GetSize();
    8040  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8041  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8042  const size_t suballoc1stCount = suballocations1st.size();
    8043  const size_t suballoc2ndCount = suballocations2nd.size();
    8044 
    8045  outInfo.blockCount = 1;
    8046  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8047  outInfo.unusedRangeCount = 0;
    8048  outInfo.usedBytes = 0;
    8049  outInfo.allocationSizeMin = UINT64_MAX;
    8050  outInfo.allocationSizeMax = 0;
    8051  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8052  outInfo.unusedRangeSizeMax = 0;
    8053 
    8054  VkDeviceSize lastOffset = 0;
    8055 
    8056  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8057  {
    8058  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8059  size_t nextAlloc2ndIndex = 0;
    8060  while(lastOffset < freeSpace2ndTo1stEnd)
    8061  {
    8062  // Find next non-null allocation or move nextAllocIndex to the end.
    8063  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8064  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8065  {
    8066  ++nextAlloc2ndIndex;
    8067  }
    8068 
    8069  // Found non-null allocation.
    8070  if(nextAlloc2ndIndex < suballoc2ndCount)
    8071  {
    8072  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8073 
    8074  // 1. Process free space before this allocation.
    8075  if(lastOffset < suballoc.offset)
    8076  {
    8077  // There is free space from lastOffset to suballoc.offset.
    8078  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8079  ++outInfo.unusedRangeCount;
    8080  outInfo.unusedBytes += unusedRangeSize;
    8081  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8082  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8083  }
    8084 
    8085  // 2. Process this allocation.
    8086  // There is allocation with suballoc.offset, suballoc.size.
    8087  outInfo.usedBytes += suballoc.size;
    8088  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8089  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8090 
    8091  // 3. Prepare for next iteration.
    8092  lastOffset = suballoc.offset + suballoc.size;
    8093  ++nextAlloc2ndIndex;
    8094  }
    8095  // We are at the end.
    8096  else
    8097  {
    8098  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8099  if(lastOffset < freeSpace2ndTo1stEnd)
    8100  {
    8101  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8102  ++outInfo.unusedRangeCount;
    8103  outInfo.unusedBytes += unusedRangeSize;
    8104  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8105  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8106  }
    8107 
    8108  // End of loop.
    8109  lastOffset = freeSpace2ndTo1stEnd;
    8110  }
    8111  }
    8112  }
    8113 
    8114  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8115  const VkDeviceSize freeSpace1stTo2ndEnd =
    8116  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8117  while(lastOffset < freeSpace1stTo2ndEnd)
    8118  {
    8119  // Find next non-null allocation or move nextAllocIndex to the end.
    8120  while(nextAlloc1stIndex < suballoc1stCount &&
    8121  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8122  {
    8123  ++nextAlloc1stIndex;
    8124  }
    8125 
    8126  // Found non-null allocation.
    8127  if(nextAlloc1stIndex < suballoc1stCount)
    8128  {
    8129  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8130 
    8131  // 1. Process free space before this allocation.
    8132  if(lastOffset < suballoc.offset)
    8133  {
    8134  // There is free space from lastOffset to suballoc.offset.
    8135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8136  ++outInfo.unusedRangeCount;
    8137  outInfo.unusedBytes += unusedRangeSize;
    8138  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8139  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8140  }
    8141 
    8142  // 2. Process this allocation.
    8143  // There is allocation with suballoc.offset, suballoc.size.
    8144  outInfo.usedBytes += suballoc.size;
    8145  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8146  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8147 
    8148  // 3. Prepare for next iteration.
    8149  lastOffset = suballoc.offset + suballoc.size;
    8150  ++nextAlloc1stIndex;
    8151  }
    8152  // We are at the end.
    8153  else
    8154  {
    8155  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8156  if(lastOffset < freeSpace1stTo2ndEnd)
    8157  {
    8158  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8159  ++outInfo.unusedRangeCount;
    8160  outInfo.unusedBytes += unusedRangeSize;
    8161  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8162  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8163  }
    8164 
    8165  // End of loop.
    8166  lastOffset = freeSpace1stTo2ndEnd;
    8167  }
    8168  }
    8169 
    8170  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8171  {
    8172  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8173  while(lastOffset < size)
    8174  {
    8175  // Find next non-null allocation or move nextAllocIndex to the end.
    8176  while(nextAlloc2ndIndex != SIZE_MAX &&
    8177  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8178  {
    8179  --nextAlloc2ndIndex;
    8180  }
    8181 
    8182  // Found non-null allocation.
    8183  if(nextAlloc2ndIndex != SIZE_MAX)
    8184  {
    8185  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8186 
    8187  // 1. Process free space before this allocation.
    8188  if(lastOffset < suballoc.offset)
    8189  {
    8190  // There is free space from lastOffset to suballoc.offset.
    8191  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8192  ++outInfo.unusedRangeCount;
    8193  outInfo.unusedBytes += unusedRangeSize;
    8194  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8195  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8196  }
    8197 
    8198  // 2. Process this allocation.
    8199  // There is allocation with suballoc.offset, suballoc.size.
    8200  outInfo.usedBytes += suballoc.size;
    8201  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8202  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8203 
    8204  // 3. Prepare for next iteration.
    8205  lastOffset = suballoc.offset + suballoc.size;
    8206  --nextAlloc2ndIndex;
    8207  }
    8208  // We are at the end.
    8209  else
    8210  {
    8211  // There is free space from lastOffset to size.
    8212  if(lastOffset < size)
    8213  {
    8214  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8215  ++outInfo.unusedRangeCount;
    8216  outInfo.unusedBytes += unusedRangeSize;
    8217  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8218  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8219  }
    8220 
    8221  // End of loop.
    8222  lastOffset = size;
    8223  }
    8224  }
    8225  }
    8226 
    8227  outInfo.unusedBytes = size - outInfo.usedBytes;
    8228 }
    8229 
    8230 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8231 {
    8232  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8233  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8234  const VkDeviceSize size = GetSize();
    8235  const size_t suballoc1stCount = suballocations1st.size();
    8236  const size_t suballoc2ndCount = suballocations2nd.size();
    8237 
    8238  inoutStats.size += size;
    8239 
    8240  VkDeviceSize lastOffset = 0;
    8241 
    8242  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8243  {
    8244  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8245  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8246  while(lastOffset < freeSpace2ndTo1stEnd)
    8247  {
    8248  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8249  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8250  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8251  {
    8252  ++nextAlloc2ndIndex;
    8253  }
    8254 
    8255  // Found non-null allocation.
    8256  if(nextAlloc2ndIndex < suballoc2ndCount)
    8257  {
    8258  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8259 
    8260  // 1. Process free space before this allocation.
    8261  if(lastOffset < suballoc.offset)
    8262  {
    8263  // There is free space from lastOffset to suballoc.offset.
    8264  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8265  inoutStats.unusedSize += unusedRangeSize;
    8266  ++inoutStats.unusedRangeCount;
    8267  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8268  }
    8269 
    8270  // 2. Process this allocation.
    8271  // There is allocation with suballoc.offset, suballoc.size.
    8272  ++inoutStats.allocationCount;
    8273 
    8274  // 3. Prepare for next iteration.
    8275  lastOffset = suballoc.offset + suballoc.size;
    8276  ++nextAlloc2ndIndex;
    8277  }
    8278  // We are at the end.
    8279  else
    8280  {
    8281  if(lastOffset < freeSpace2ndTo1stEnd)
    8282  {
    8283  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8284  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8285  inoutStats.unusedSize += unusedRangeSize;
    8286  ++inoutStats.unusedRangeCount;
    8287  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8288  }
    8289 
    8290  // End of loop.
    8291  lastOffset = freeSpace2ndTo1stEnd;
    8292  }
    8293  }
    8294  }
    8295 
    8296  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8297  const VkDeviceSize freeSpace1stTo2ndEnd =
    8298  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8299  while(lastOffset < freeSpace1stTo2ndEnd)
    8300  {
    8301  // Find next non-null allocation or move nextAllocIndex to the end.
    8302  while(nextAlloc1stIndex < suballoc1stCount &&
    8303  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8304  {
    8305  ++nextAlloc1stIndex;
    8306  }
    8307 
    8308  // Found non-null allocation.
    8309  if(nextAlloc1stIndex < suballoc1stCount)
    8310  {
    8311  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8312 
    8313  // 1. Process free space before this allocation.
    8314  if(lastOffset < suballoc.offset)
    8315  {
    8316  // There is free space from lastOffset to suballoc.offset.
    8317  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8318  inoutStats.unusedSize += unusedRangeSize;
    8319  ++inoutStats.unusedRangeCount;
    8320  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8321  }
    8322 
    8323  // 2. Process this allocation.
    8324  // There is allocation with suballoc.offset, suballoc.size.
    8325  ++inoutStats.allocationCount;
    8326 
    8327  // 3. Prepare for next iteration.
    8328  lastOffset = suballoc.offset + suballoc.size;
    8329  ++nextAlloc1stIndex;
    8330  }
    8331  // We are at the end.
    8332  else
    8333  {
    8334  if(lastOffset < freeSpace1stTo2ndEnd)
    8335  {
    8336  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8337  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8338  inoutStats.unusedSize += unusedRangeSize;
    8339  ++inoutStats.unusedRangeCount;
    8340  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8341  }
    8342 
    8343  // End of loop.
    8344  lastOffset = freeSpace1stTo2ndEnd;
    8345  }
    8346  }
    8347 
    8348  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8349  {
    8350  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8351  while(lastOffset < size)
    8352  {
    8353  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8354  while(nextAlloc2ndIndex != SIZE_MAX &&
    8355  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8356  {
    8357  --nextAlloc2ndIndex;
    8358  }
    8359 
    8360  // Found non-null allocation.
    8361  if(nextAlloc2ndIndex != SIZE_MAX)
    8362  {
    8363  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8364 
    8365  // 1. Process free space before this allocation.
    8366  if(lastOffset < suballoc.offset)
    8367  {
    8368  // There is free space from lastOffset to suballoc.offset.
    8369  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8370  inoutStats.unusedSize += unusedRangeSize;
    8371  ++inoutStats.unusedRangeCount;
    8372  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8373  }
    8374 
    8375  // 2. Process this allocation.
    8376  // There is allocation with suballoc.offset, suballoc.size.
    8377  ++inoutStats.allocationCount;
    8378 
    8379  // 3. Prepare for next iteration.
    8380  lastOffset = suballoc.offset + suballoc.size;
    8381  --nextAlloc2ndIndex;
    8382  }
    8383  // We are at the end.
    8384  else
    8385  {
    8386  if(lastOffset < size)
    8387  {
    8388  // There is free space from lastOffset to size.
    8389  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8390  inoutStats.unusedSize += unusedRangeSize;
    8391  ++inoutStats.unusedRangeCount;
    8392  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8393  }
    8394 
    8395  // End of loop.
    8396  lastOffset = size;
    8397  }
    8398  }
    8399  }
    8400 }
    8401 
    8402 #if VMA_STATS_STRING_ENABLED
    8403 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8404 {
    8405  const VkDeviceSize size = GetSize();
    8406  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8407  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8408  const size_t suballoc1stCount = suballocations1st.size();
    8409  const size_t suballoc2ndCount = suballocations2nd.size();
    8410 
    8411  // FIRST PASS
    8412 
    8413  size_t unusedRangeCount = 0;
    8414  VkDeviceSize usedBytes = 0;
    8415 
    8416  VkDeviceSize lastOffset = 0;
    8417 
    8418  size_t alloc2ndCount = 0;
    8419  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8420  {
    8421  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8422  size_t nextAlloc2ndIndex = 0;
    8423  while(lastOffset < freeSpace2ndTo1stEnd)
    8424  {
    8425  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8426  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8427  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8428  {
    8429  ++nextAlloc2ndIndex;
    8430  }
    8431 
    8432  // Found non-null allocation.
    8433  if(nextAlloc2ndIndex < suballoc2ndCount)
    8434  {
    8435  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8436 
    8437  // 1. Process free space before this allocation.
    8438  if(lastOffset < suballoc.offset)
    8439  {
    8440  // There is free space from lastOffset to suballoc.offset.
    8441  ++unusedRangeCount;
    8442  }
    8443 
    8444  // 2. Process this allocation.
    8445  // There is allocation with suballoc.offset, suballoc.size.
    8446  ++alloc2ndCount;
    8447  usedBytes += suballoc.size;
    8448 
    8449  // 3. Prepare for next iteration.
    8450  lastOffset = suballoc.offset + suballoc.size;
    8451  ++nextAlloc2ndIndex;
    8452  }
    8453  // We are at the end.
    8454  else
    8455  {
    8456  if(lastOffset < freeSpace2ndTo1stEnd)
    8457  {
    8458  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8459  ++unusedRangeCount;
    8460  }
    8461 
    8462  // End of loop.
    8463  lastOffset = freeSpace2ndTo1stEnd;
    8464  }
    8465  }
    8466  }
    8467 
    8468  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8469  size_t alloc1stCount = 0;
    8470  const VkDeviceSize freeSpace1stTo2ndEnd =
    8471  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8472  while(lastOffset < freeSpace1stTo2ndEnd)
    8473  {
    8474  // Find next non-null allocation or move nextAllocIndex to the end.
    8475  while(nextAlloc1stIndex < suballoc1stCount &&
    8476  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8477  {
    8478  ++nextAlloc1stIndex;
    8479  }
    8480 
    8481  // Found non-null allocation.
    8482  if(nextAlloc1stIndex < suballoc1stCount)
    8483  {
    8484  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8485 
    8486  // 1. Process free space before this allocation.
    8487  if(lastOffset < suballoc.offset)
    8488  {
    8489  // There is free space from lastOffset to suballoc.offset.
    8490  ++unusedRangeCount;
    8491  }
    8492 
    8493  // 2. Process this allocation.
    8494  // There is allocation with suballoc.offset, suballoc.size.
    8495  ++alloc1stCount;
    8496  usedBytes += suballoc.size;
    8497 
    8498  // 3. Prepare for next iteration.
    8499  lastOffset = suballoc.offset + suballoc.size;
    8500  ++nextAlloc1stIndex;
    8501  }
    8502  // We are at the end.
    8503  else
    8504  {
    8505  if(lastOffset < size)
    8506  {
    8507  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8508  ++unusedRangeCount;
    8509  }
    8510 
    8511  // End of loop.
    8512  lastOffset = freeSpace1stTo2ndEnd;
    8513  }
    8514  }
    8515 
    8516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8517  {
    8518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8519  while(lastOffset < size)
    8520  {
    8521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8522  while(nextAlloc2ndIndex != SIZE_MAX &&
    8523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8524  {
    8525  --nextAlloc2ndIndex;
    8526  }
    8527 
    8528  // Found non-null allocation.
    8529  if(nextAlloc2ndIndex != SIZE_MAX)
    8530  {
    8531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8532 
    8533  // 1. Process free space before this allocation.
    8534  if(lastOffset < suballoc.offset)
    8535  {
    8536  // There is free space from lastOffset to suballoc.offset.
    8537  ++unusedRangeCount;
    8538  }
    8539 
    8540  // 2. Process this allocation.
    8541  // There is allocation with suballoc.offset, suballoc.size.
    8542  ++alloc2ndCount;
    8543  usedBytes += suballoc.size;
    8544 
    8545  // 3. Prepare for next iteration.
    8546  lastOffset = suballoc.offset + suballoc.size;
    8547  --nextAlloc2ndIndex;
    8548  }
    8549  // We are at the end.
    8550  else
    8551  {
    8552  if(lastOffset < size)
    8553  {
    8554  // There is free space from lastOffset to size.
    8555  ++unusedRangeCount;
    8556  }
    8557 
    8558  // End of loop.
    8559  lastOffset = size;
    8560  }
    8561  }
    8562  }
    8563 
    8564  const VkDeviceSize unusedBytes = size - usedBytes;
    8565  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8566 
    8567  // SECOND PASS
    8568  lastOffset = 0;
    8569 
    8570  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8571  {
    8572  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8573  size_t nextAlloc2ndIndex = 0;
    8574  while(lastOffset < freeSpace2ndTo1stEnd)
    8575  {
    8576  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8577  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8578  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8579  {
    8580  ++nextAlloc2ndIndex;
    8581  }
    8582 
    8583  // Found non-null allocation.
    8584  if(nextAlloc2ndIndex < suballoc2ndCount)
    8585  {
    8586  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8587 
    8588  // 1. Process free space before this allocation.
    8589  if(lastOffset < suballoc.offset)
    8590  {
    8591  // There is free space from lastOffset to suballoc.offset.
    8592  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8593  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8594  }
    8595 
    8596  // 2. Process this allocation.
    8597  // There is allocation with suballoc.offset, suballoc.size.
    8598  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8599 
    8600  // 3. Prepare for next iteration.
    8601  lastOffset = suballoc.offset + suballoc.size;
    8602  ++nextAlloc2ndIndex;
    8603  }
    8604  // We are at the end.
    8605  else
    8606  {
    8607  if(lastOffset < freeSpace2ndTo1stEnd)
    8608  {
    8609  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8610  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8611  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8612  }
    8613 
    8614  // End of loop.
    8615  lastOffset = freeSpace2ndTo1stEnd;
    8616  }
    8617  }
    8618  }
    8619 
    8620  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8621  while(lastOffset < freeSpace1stTo2ndEnd)
    8622  {
    8623  // Find next non-null allocation or move nextAllocIndex to the end.
    8624  while(nextAlloc1stIndex < suballoc1stCount &&
    8625  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8626  {
    8627  ++nextAlloc1stIndex;
    8628  }
    8629 
    8630  // Found non-null allocation.
    8631  if(nextAlloc1stIndex < suballoc1stCount)
    8632  {
    8633  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8634 
    8635  // 1. Process free space before this allocation.
    8636  if(lastOffset < suballoc.offset)
    8637  {
    8638  // There is free space from lastOffset to suballoc.offset.
    8639  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8640  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8641  }
    8642 
    8643  // 2. Process this allocation.
    8644  // There is allocation with suballoc.offset, suballoc.size.
    8645  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8646 
    8647  // 3. Prepare for next iteration.
    8648  lastOffset = suballoc.offset + suballoc.size;
    8649  ++nextAlloc1stIndex;
    8650  }
    8651  // We are at the end.
    8652  else
    8653  {
    8654  if(lastOffset < freeSpace1stTo2ndEnd)
    8655  {
    8656  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8657  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8658  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8659  }
    8660 
    8661  // End of loop.
    8662  lastOffset = freeSpace1stTo2ndEnd;
    8663  }
    8664  }
    8665 
    8666  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8667  {
    8668  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8669  while(lastOffset < size)
    8670  {
    8671  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8672  while(nextAlloc2ndIndex != SIZE_MAX &&
    8673  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8674  {
    8675  --nextAlloc2ndIndex;
    8676  }
    8677 
    8678  // Found non-null allocation.
    8679  if(nextAlloc2ndIndex != SIZE_MAX)
    8680  {
    8681  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8682 
    8683  // 1. Process free space before this allocation.
    8684  if(lastOffset < suballoc.offset)
    8685  {
    8686  // There is free space from lastOffset to suballoc.offset.
    8687  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8688  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8689  }
    8690 
    8691  // 2. Process this allocation.
    8692  // There is allocation with suballoc.offset, suballoc.size.
    8693  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8694 
    8695  // 3. Prepare for next iteration.
    8696  lastOffset = suballoc.offset + suballoc.size;
    8697  --nextAlloc2ndIndex;
    8698  }
    8699  // We are at the end.
    8700  else
    8701  {
    8702  if(lastOffset < size)
    8703  {
    8704  // There is free space from lastOffset to size.
    8705  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8706  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8707  }
    8708 
    8709  // End of loop.
    8710  lastOffset = size;
    8711  }
    8712  }
    8713  }
    8714 
    8715  PrintDetailedMap_End(json);
    8716 }
    8717 #endif // #if VMA_STATS_STRING_ENABLED
    8718 
    8719 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8720  uint32_t currentFrameIndex,
    8721  uint32_t frameInUseCount,
    8722  VkDeviceSize bufferImageGranularity,
    8723  VkDeviceSize allocSize,
    8724  VkDeviceSize allocAlignment,
    8725  bool upperAddress,
    8726  VmaSuballocationType allocType,
    8727  bool canMakeOtherLost,
    8728  uint32_t strategy,
    8729  VmaAllocationRequest* pAllocationRequest)
    8730 {
    8731  VMA_ASSERT(allocSize > 0);
    8732  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8733  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8734  VMA_HEAVY_ASSERT(Validate());
    8735 
    8736  const VkDeviceSize size = GetSize();
    8737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8739 
    8740  if(upperAddress)
    8741  {
    8742  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8743  {
    8744  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8745  return false;
    8746  }
    8747 
    8748  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8749  if(allocSize > size)
    8750  {
    8751  return false;
    8752  }
    8753  VkDeviceSize resultBaseOffset = size - allocSize;
    8754  if(!suballocations2nd.empty())
    8755  {
    8756  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8757  resultBaseOffset = lastSuballoc.offset - allocSize;
    8758  if(allocSize > lastSuballoc.offset)
    8759  {
    8760  return false;
    8761  }
    8762  }
    8763 
    8764  // Start from offset equal to end of free space.
    8765  VkDeviceSize resultOffset = resultBaseOffset;
    8766 
    8767  // Apply VMA_DEBUG_MARGIN at the end.
    8768  if(VMA_DEBUG_MARGIN > 0)
    8769  {
    8770  if(resultOffset < VMA_DEBUG_MARGIN)
    8771  {
    8772  return false;
    8773  }
    8774  resultOffset -= VMA_DEBUG_MARGIN;
    8775  }
    8776 
    8777  // Apply alignment.
    8778  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8779 
    8780  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8781  // Make bigger alignment if necessary.
    8782  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8783  {
    8784  bool bufferImageGranularityConflict = false;
    8785  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8786  {
    8787  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8788  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8789  {
    8790  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8791  {
    8792  bufferImageGranularityConflict = true;
    8793  break;
    8794  }
    8795  }
    8796  else
    8797  // Already on previous page.
    8798  break;
    8799  }
    8800  if(bufferImageGranularityConflict)
    8801  {
    8802  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8803  }
    8804  }
    8805 
    8806  // There is enough free space.
    8807  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8808  suballocations1st.back().offset + suballocations1st.back().size :
    8809  0;
    8810  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8811  {
    8812  // Check previous suballocations for BufferImageGranularity conflicts.
    8813  // If conflict exists, allocation cannot be made here.
    8814  if(bufferImageGranularity > 1)
    8815  {
    8816  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8817  {
    8818  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8819  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8820  {
    8821  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8822  {
    8823  return false;
    8824  }
    8825  }
    8826  else
    8827  {
    8828  // Already on next page.
    8829  break;
    8830  }
    8831  }
    8832  }
    8833 
    8834  // All tests passed: Success.
    8835  pAllocationRequest->offset = resultOffset;
    8836  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8837  pAllocationRequest->sumItemSize = 0;
    8838  // pAllocationRequest->item unused.
    8839  pAllocationRequest->itemsToMakeLostCount = 0;
    8840  return true;
    8841  }
    8842  }
    8843  else // !upperAddress
    8844  {
    8845  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8846  {
    8847  // Try to allocate at the end of 1st vector.
    8848 
    8849  VkDeviceSize resultBaseOffset = 0;
    8850  if(!suballocations1st.empty())
    8851  {
    8852  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8853  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8854  }
    8855 
    8856  // Start from offset equal to beginning of free space.
    8857  VkDeviceSize resultOffset = resultBaseOffset;
    8858 
    8859  // Apply VMA_DEBUG_MARGIN at the beginning.
    8860  if(VMA_DEBUG_MARGIN > 0)
    8861  {
    8862  resultOffset += VMA_DEBUG_MARGIN;
    8863  }
    8864 
    8865  // Apply alignment.
    8866  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8867 
    8868  // Check previous suballocations for BufferImageGranularity conflicts.
    8869  // Make bigger alignment if necessary.
    8870  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8871  {
    8872  bool bufferImageGranularityConflict = false;
    8873  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8874  {
    8875  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8876  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8877  {
    8878  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8879  {
    8880  bufferImageGranularityConflict = true;
    8881  break;
    8882  }
    8883  }
    8884  else
    8885  // Already on previous page.
    8886  break;
    8887  }
    8888  if(bufferImageGranularityConflict)
    8889  {
    8890  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8891  }
    8892  }
    8893 
    8894  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8895  suballocations2nd.back().offset : size;
    8896 
    8897  // There is enough free space at the end after alignment.
    8898  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8899  {
    8900  // Check next suballocations for BufferImageGranularity conflicts.
    8901  // If conflict exists, allocation cannot be made here.
    8902  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8903  {
    8904  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8905  {
    8906  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8907  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8908  {
    8909  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8910  {
    8911  return false;
    8912  }
    8913  }
    8914  else
    8915  {
    8916  // Already on previous page.
    8917  break;
    8918  }
    8919  }
    8920  }
    8921 
    8922  // All tests passed: Success.
    8923  pAllocationRequest->offset = resultOffset;
    8924  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8925  pAllocationRequest->sumItemSize = 0;
    8926  // pAllocationRequest->item unused.
    8927  pAllocationRequest->itemsToMakeLostCount = 0;
    8928  return true;
    8929  }
    8930  }
    8931 
    8932  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8933  // beginning of 1st vector as the end of free space.
    8934  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8935  {
    8936  VMA_ASSERT(!suballocations1st.empty());
    8937 
    8938  VkDeviceSize resultBaseOffset = 0;
    8939  if(!suballocations2nd.empty())
    8940  {
    8941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8942  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8943  }
    8944 
    8945  // Start from offset equal to beginning of free space.
    8946  VkDeviceSize resultOffset = resultBaseOffset;
    8947 
    8948  // Apply VMA_DEBUG_MARGIN at the beginning.
    8949  if(VMA_DEBUG_MARGIN > 0)
    8950  {
    8951  resultOffset += VMA_DEBUG_MARGIN;
    8952  }
    8953 
    8954  // Apply alignment.
    8955  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8956 
    8957  // Check previous suballocations for BufferImageGranularity conflicts.
    8958  // Make bigger alignment if necessary.
    8959  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8960  {
    8961  bool bufferImageGranularityConflict = false;
    8962  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8963  {
    8964  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8965  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8966  {
    8967  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8968  {
    8969  bufferImageGranularityConflict = true;
    8970  break;
    8971  }
    8972  }
    8973  else
    8974  // Already on previous page.
    8975  break;
    8976  }
    8977  if(bufferImageGranularityConflict)
    8978  {
    8979  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8980  }
    8981  }
    8982 
    8983  pAllocationRequest->itemsToMakeLostCount = 0;
    8984  pAllocationRequest->sumItemSize = 0;
    8985  size_t index1st = m_1stNullItemsBeginCount;
    8986 
    8987  if(canMakeOtherLost)
    8988  {
    8989  while(index1st < suballocations1st.size() &&
    8990  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8991  {
    8992  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8993  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8994  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8995  {
    8996  // No problem.
    8997  }
    8998  else
    8999  {
    9000  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9001  if(suballoc.hAllocation->CanBecomeLost() &&
    9002  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9003  {
    9004  ++pAllocationRequest->itemsToMakeLostCount;
    9005  pAllocationRequest->sumItemSize += suballoc.size;
    9006  }
    9007  else
    9008  {
    9009  return false;
    9010  }
    9011  }
    9012  ++index1st;
    9013  }
    9014 
    9015  // Check next suballocations for BufferImageGranularity conflicts.
    9016  // If conflict exists, we must mark more allocations lost or fail.
    9017  if(bufferImageGranularity > 1)
    9018  {
    9019  while(index1st < suballocations1st.size())
    9020  {
    9021  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9022  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9023  {
    9024  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9025  {
    9026  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9027  if(suballoc.hAllocation->CanBecomeLost() &&
    9028  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9029  {
    9030  ++pAllocationRequest->itemsToMakeLostCount;
    9031  pAllocationRequest->sumItemSize += suballoc.size;
    9032  }
    9033  else
    9034  {
    9035  return false;
    9036  }
    9037  }
    9038  }
    9039  else
    9040  {
    9041  // Already on next page.
    9042  break;
    9043  }
    9044  ++index1st;
    9045  }
    9046  }
    9047  }
    9048 
    9049  // There is enough free space at the end after alignment.
    9050  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9051  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9052  {
    9053  // Check next suballocations for BufferImageGranularity conflicts.
    9054  // If conflict exists, allocation cannot be made here.
    9055  if(bufferImageGranularity > 1)
    9056  {
    9057  for(size_t nextSuballocIndex = index1st;
    9058  nextSuballocIndex < suballocations1st.size();
    9059  nextSuballocIndex++)
    9060  {
    9061  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9062  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9063  {
    9064  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9065  {
    9066  return false;
    9067  }
    9068  }
    9069  else
    9070  {
    9071  // Already on next page.
    9072  break;
    9073  }
    9074  }
    9075  }
    9076 
    9077  // All tests passed: Success.
    9078  pAllocationRequest->offset = resultOffset;
    9079  pAllocationRequest->sumFreeSize =
    9080  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9081  - resultBaseOffset
    9082  - pAllocationRequest->sumItemSize;
    9083  // pAllocationRequest->item unused.
    9084  return true;
    9085  }
    9086  }
    9087  }
    9088 
    9089  return false;
    9090 }
    9091 
    9092 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9093  uint32_t currentFrameIndex,
    9094  uint32_t frameInUseCount,
    9095  VmaAllocationRequest* pAllocationRequest)
    9096 {
    9097  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9098  {
    9099  return true;
    9100  }
    9101 
    9102  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9103 
    9104  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9105  size_t index1st = m_1stNullItemsBeginCount;
    9106  size_t madeLostCount = 0;
    9107  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9108  {
    9109  VMA_ASSERT(index1st < suballocations1st.size());
    9110  VmaSuballocation& suballoc = suballocations1st[index1st];
    9111  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9112  {
    9113  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9114  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9115  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9116  {
    9117  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9118  suballoc.hAllocation = VK_NULL_HANDLE;
    9119  m_SumFreeSize += suballoc.size;
    9120  ++m_1stNullItemsMiddleCount;
    9121  ++madeLostCount;
    9122  }
    9123  else
    9124  {
    9125  return false;
    9126  }
    9127  }
    9128  ++index1st;
    9129  }
    9130 
    9131  CleanupAfterFree();
    9132  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9133 
    9134  return true;
    9135 }
    9136 
    9137 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9138 {
    9139  uint32_t lostAllocationCount = 0;
    9140 
    9141  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9142  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9143  {
    9144  VmaSuballocation& suballoc = suballocations1st[i];
    9145  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9146  suballoc.hAllocation->CanBecomeLost() &&
    9147  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9148  {
    9149  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9150  suballoc.hAllocation = VK_NULL_HANDLE;
    9151  ++m_1stNullItemsMiddleCount;
    9152  m_SumFreeSize += suballoc.size;
    9153  ++lostAllocationCount;
    9154  }
    9155  }
    9156 
    9157  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9158  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9159  {
    9160  VmaSuballocation& suballoc = suballocations2nd[i];
    9161  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9162  suballoc.hAllocation->CanBecomeLost() &&
    9163  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9164  {
    9165  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9166  suballoc.hAllocation = VK_NULL_HANDLE;
    9167  ++m_2ndNullItemsCount;
    9168  ++lostAllocationCount;
    9169  }
    9170  }
    9171 
    9172  if(lostAllocationCount)
    9173  {
    9174  CleanupAfterFree();
    9175  }
    9176 
    9177  return lostAllocationCount;
    9178 }
    9179 
    9180 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9181 {
    9182  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9183  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9184  {
    9185  const VmaSuballocation& suballoc = suballocations1st[i];
    9186  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9187  {
    9188  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9189  {
    9190  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9191  return VK_ERROR_VALIDATION_FAILED_EXT;
    9192  }
    9193  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9194  {
    9195  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9196  return VK_ERROR_VALIDATION_FAILED_EXT;
    9197  }
    9198  }
    9199  }
    9200 
    9201  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9202  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9203  {
    9204  const VmaSuballocation& suballoc = suballocations2nd[i];
    9205  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9206  {
    9207  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9208  {
    9209  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9210  return VK_ERROR_VALIDATION_FAILED_EXT;
    9211  }
    9212  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9213  {
    9214  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9215  return VK_ERROR_VALIDATION_FAILED_EXT;
    9216  }
    9217  }
    9218  }
    9219 
    9220  return VK_SUCCESS;
    9221 }
    9222 
    9223 void VmaBlockMetadata_Linear::Alloc(
    9224  const VmaAllocationRequest& request,
    9225  VmaSuballocationType type,
    9226  VkDeviceSize allocSize,
    9227  bool upperAddress,
    9228  VmaAllocation hAllocation)
    9229 {
    9230  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9231 
    9232  if(upperAddress)
    9233  {
    9234  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9235  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9236  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9237  suballocations2nd.push_back(newSuballoc);
    9238  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9239  }
    9240  else
    9241  {
    9242  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9243 
    9244  // First allocation.
    9245  if(suballocations1st.empty())
    9246  {
    9247  suballocations1st.push_back(newSuballoc);
    9248  }
    9249  else
    9250  {
    9251  // New allocation at the end of 1st vector.
    9252  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9253  {
    9254  // Check if it fits before the end of the block.
    9255  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9256  suballocations1st.push_back(newSuballoc);
    9257  }
    9258  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9259  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9260  {
    9261  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9262 
    9263  switch(m_2ndVectorMode)
    9264  {
    9265  case SECOND_VECTOR_EMPTY:
    9266  // First allocation from second part ring buffer.
    9267  VMA_ASSERT(suballocations2nd.empty());
    9268  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9269  break;
    9270  case SECOND_VECTOR_RING_BUFFER:
    9271  // 2-part ring buffer is already started.
    9272  VMA_ASSERT(!suballocations2nd.empty());
    9273  break;
    9274  case SECOND_VECTOR_DOUBLE_STACK:
    9275  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9276  break;
    9277  default:
    9278  VMA_ASSERT(0);
    9279  }
    9280 
    9281  suballocations2nd.push_back(newSuballoc);
    9282  }
    9283  else
    9284  {
    9285  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9286  }
    9287  }
    9288  }
    9289 
    9290  m_SumFreeSize -= newSuballoc.size;
    9291 }
    9292 
    9293 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9294 {
    9295  FreeAtOffset(allocation->GetOffset());
    9296 }
    9297 
    9298 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9299 {
    9300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9301  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9302 
    9303  if(!suballocations1st.empty())
    9304  {
    9305  // First allocation: Mark it as next empty at the beginning.
    9306  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9307  if(firstSuballoc.offset == offset)
    9308  {
    9309  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9310  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9311  m_SumFreeSize += firstSuballoc.size;
    9312  ++m_1stNullItemsBeginCount;
    9313  CleanupAfterFree();
    9314  return;
    9315  }
    9316  }
    9317 
    9318  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9319  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9320  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9321  {
    9322  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9323  if(lastSuballoc.offset == offset)
    9324  {
    9325  m_SumFreeSize += lastSuballoc.size;
    9326  suballocations2nd.pop_back();
    9327  CleanupAfterFree();
    9328  return;
    9329  }
    9330  }
    9331  // Last allocation in 1st vector.
    9332  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9333  {
    9334  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9335  if(lastSuballoc.offset == offset)
    9336  {
    9337  m_SumFreeSize += lastSuballoc.size;
    9338  suballocations1st.pop_back();
    9339  CleanupAfterFree();
    9340  return;
    9341  }
    9342  }
    9343 
    9344  // Item from the middle of 1st vector.
    9345  {
    9346  VmaSuballocation refSuballoc;
    9347  refSuballoc.offset = offset;
    9348  // Rest of members stays uninitialized intentionally for better performance.
    9349  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9350  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9351  suballocations1st.end(),
    9352  refSuballoc);
    9353  if(it != suballocations1st.end())
    9354  {
    9355  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9356  it->hAllocation = VK_NULL_HANDLE;
    9357  ++m_1stNullItemsMiddleCount;
    9358  m_SumFreeSize += it->size;
    9359  CleanupAfterFree();
    9360  return;
    9361  }
    9362  }
    9363 
    9364  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9365  {
    9366  // Item from the middle of 2nd vector.
    9367  VmaSuballocation refSuballoc;
    9368  refSuballoc.offset = offset;
    9369  // Rest of members stays uninitialized intentionally for better performance.
    9370  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9371  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9372  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9373  if(it != suballocations2nd.end())
    9374  {
    9375  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9376  it->hAllocation = VK_NULL_HANDLE;
    9377  ++m_2ndNullItemsCount;
    9378  m_SumFreeSize += it->size;
    9379  CleanupAfterFree();
    9380  return;
    9381  }
    9382  }
    9383 
    9384  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9385 }
    9386 
    9387 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9388 {
    9389  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9390  const size_t suballocCount = AccessSuballocations1st().size();
    9391  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9392 }
    9393 
    9394 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9395 {
    9396  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9397  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9398 
    9399  if(IsEmpty())
    9400  {
    9401  suballocations1st.clear();
    9402  suballocations2nd.clear();
    9403  m_1stNullItemsBeginCount = 0;
    9404  m_1stNullItemsMiddleCount = 0;
    9405  m_2ndNullItemsCount = 0;
    9406  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9407  }
    9408  else
    9409  {
    9410  const size_t suballoc1stCount = suballocations1st.size();
    9411  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9412  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9413 
    9414  // Find more null items at the beginning of 1st vector.
    9415  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9416  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9417  {
    9418  ++m_1stNullItemsBeginCount;
    9419  --m_1stNullItemsMiddleCount;
    9420  }
    9421 
    9422  // Find more null items at the end of 1st vector.
    9423  while(m_1stNullItemsMiddleCount > 0 &&
    9424  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9425  {
    9426  --m_1stNullItemsMiddleCount;
    9427  suballocations1st.pop_back();
    9428  }
    9429 
    9430  // Find more null items at the end of 2nd vector.
    9431  while(m_2ndNullItemsCount > 0 &&
    9432  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9433  {
    9434  --m_2ndNullItemsCount;
    9435  suballocations2nd.pop_back();
    9436  }
    9437 
    9438  if(ShouldCompact1st())
    9439  {
    9440  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9441  size_t srcIndex = m_1stNullItemsBeginCount;
    9442  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9443  {
    9444  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9445  {
    9446  ++srcIndex;
    9447  }
    9448  if(dstIndex != srcIndex)
    9449  {
    9450  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9451  }
    9452  ++srcIndex;
    9453  }
    9454  suballocations1st.resize(nonNullItemCount);
    9455  m_1stNullItemsBeginCount = 0;
    9456  m_1stNullItemsMiddleCount = 0;
    9457  }
    9458 
    9459  // 2nd vector became empty.
    9460  if(suballocations2nd.empty())
    9461  {
    9462  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9463  }
    9464 
    9465  // 1st vector became empty.
    9466  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9467  {
    9468  suballocations1st.clear();
    9469  m_1stNullItemsBeginCount = 0;
    9470 
    9471  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9472  {
    9473  // Swap 1st with 2nd. Now 2nd is empty.
    9474  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9475  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9476  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9477  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9478  {
    9479  ++m_1stNullItemsBeginCount;
    9480  --m_1stNullItemsMiddleCount;
    9481  }
    9482  m_2ndNullItemsCount = 0;
    9483  m_1stVectorIndex ^= 1;
    9484  }
    9485  }
    9486  }
    9487 
    9488  VMA_HEAVY_ASSERT(Validate());
    9489 }
    9490 
    9491 
    9493 // class VmaBlockMetadata_Buddy
    9494 
    9495 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9496  VmaBlockMetadata(hAllocator),
    9497  m_Root(VMA_NULL),
    9498  m_AllocationCount(0),
    9499  m_FreeCount(1),
    9500  m_SumFreeSize(0)
    9501 {
    9502  memset(m_FreeList, 0, sizeof(m_FreeList));
    9503 }
    9504 
    9505 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9506 {
    9507  DeleteNode(m_Root);
    9508 }
    9509 
    9510 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9511 {
    9512  VmaBlockMetadata::Init(size);
    9513 
    9514  m_UsableSize = VmaPrevPow2(size);
    9515  m_SumFreeSize = m_UsableSize;
    9516 
    9517  // Calculate m_LevelCount.
    9518  m_LevelCount = 1;
    9519  while(m_LevelCount < MAX_LEVELS &&
    9520  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9521  {
    9522  ++m_LevelCount;
    9523  }
    9524 
    9525  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9526  rootNode->offset = 0;
    9527  rootNode->type = Node::TYPE_FREE;
    9528  rootNode->parent = VMA_NULL;
    9529  rootNode->buddy = VMA_NULL;
    9530 
    9531  m_Root = rootNode;
    9532  AddToFreeListFront(0, rootNode);
    9533 }
    9534 
    9535 bool VmaBlockMetadata_Buddy::Validate() const
    9536 {
    9537  // Validate tree.
    9538  ValidationContext ctx;
    9539  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9540  {
    9541  VMA_VALIDATE(false && "ValidateNode failed.");
    9542  }
    9543  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9544  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9545 
    9546  // Validate free node lists.
    9547  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9548  {
    9549  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9550  m_FreeList[level].front->free.prev == VMA_NULL);
    9551 
    9552  for(Node* node = m_FreeList[level].front;
    9553  node != VMA_NULL;
    9554  node = node->free.next)
    9555  {
    9556  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9557 
    9558  if(node->free.next == VMA_NULL)
    9559  {
    9560  VMA_VALIDATE(m_FreeList[level].back == node);
    9561  }
    9562  else
    9563  {
    9564  VMA_VALIDATE(node->free.next->free.prev == node);
    9565  }
    9566  }
    9567  }
    9568 
    9569  // Validate that free lists ar higher levels are empty.
    9570  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9571  {
    9572  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9573  }
    9574 
    9575  return true;
    9576 }
    9577 
    9578 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9579 {
    9580  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9581  {
    9582  if(m_FreeList[level].front != VMA_NULL)
    9583  {
    9584  return LevelToNodeSize(level);
    9585  }
    9586  }
    9587  return 0;
    9588 }
    9589 
    9590 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9591 {
    9592  const VkDeviceSize unusableSize = GetUnusableSize();
    9593 
    9594  outInfo.blockCount = 1;
    9595 
    9596  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9597  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9598 
    9599  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9600  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9601  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9602 
    9603  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9604 
    9605  if(unusableSize > 0)
    9606  {
    9607  ++outInfo.unusedRangeCount;
    9608  outInfo.unusedBytes += unusableSize;
    9609  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9610  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9611  }
    9612 }
    9613 
    9614 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9615 {
    9616  const VkDeviceSize unusableSize = GetUnusableSize();
    9617 
    9618  inoutStats.size += GetSize();
    9619  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9620  inoutStats.allocationCount += m_AllocationCount;
    9621  inoutStats.unusedRangeCount += m_FreeCount;
    9622  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9623 
    9624  if(unusableSize > 0)
    9625  {
    9626  ++inoutStats.unusedRangeCount;
    9627  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9628  }
    9629 }
    9630 
    9631 #if VMA_STATS_STRING_ENABLED
    9632 
    9633 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9634 {
    9635  // TODO optimize
    9636  VmaStatInfo stat;
    9637  CalcAllocationStatInfo(stat);
    9638 
    9639  PrintDetailedMap_Begin(
    9640  json,
    9641  stat.unusedBytes,
    9642  stat.allocationCount,
    9643  stat.unusedRangeCount);
    9644 
    9645  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9646 
    9647  const VkDeviceSize unusableSize = GetUnusableSize();
    9648  if(unusableSize > 0)
    9649  {
    9650  PrintDetailedMap_UnusedRange(json,
    9651  m_UsableSize, // offset
    9652  unusableSize); // size
    9653  }
    9654 
    9655  PrintDetailedMap_End(json);
    9656 }
    9657 
    9658 #endif // #if VMA_STATS_STRING_ENABLED
    9659 
    9660 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9661  uint32_t currentFrameIndex,
    9662  uint32_t frameInUseCount,
    9663  VkDeviceSize bufferImageGranularity,
    9664  VkDeviceSize allocSize,
    9665  VkDeviceSize allocAlignment,
    9666  bool upperAddress,
    9667  VmaSuballocationType allocType,
    9668  bool canMakeOtherLost,
    9669  uint32_t strategy,
    9670  VmaAllocationRequest* pAllocationRequest)
    9671 {
    9672  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9673 
    9674  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9675  // Whenever it might be an OPTIMAL image...
    9676  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9677  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9678  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9679  {
    9680  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9681  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9682  }
    9683 
    9684  if(allocSize > m_UsableSize)
    9685  {
    9686  return false;
    9687  }
    9688 
    9689  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9690  for(uint32_t level = targetLevel + 1; level--; )
    9691  {
    9692  for(Node* freeNode = m_FreeList[level].front;
    9693  freeNode != VMA_NULL;
    9694  freeNode = freeNode->free.next)
    9695  {
    9696  if(freeNode->offset % allocAlignment == 0)
    9697  {
    9698  pAllocationRequest->offset = freeNode->offset;
    9699  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9700  pAllocationRequest->sumItemSize = 0;
    9701  pAllocationRequest->itemsToMakeLostCount = 0;
    9702  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9703  return true;
    9704  }
    9705  }
    9706  }
    9707 
    9708  return false;
    9709 }
    9710 
    9711 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9712  uint32_t currentFrameIndex,
    9713  uint32_t frameInUseCount,
    9714  VmaAllocationRequest* pAllocationRequest)
    9715 {
    9716  /*
    9717  Lost allocations are not supported in buddy allocator at the moment.
    9718  Support might be added in the future.
    9719  */
    9720  return pAllocationRequest->itemsToMakeLostCount == 0;
    9721 }
    9722 
    9723 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9724 {
    9725  /*
    9726  Lost allocations are not supported in buddy allocator at the moment.
    9727  Support might be added in the future.
    9728  */
    9729  return 0;
    9730 }
    9731 
    9732 void VmaBlockMetadata_Buddy::Alloc(
    9733  const VmaAllocationRequest& request,
    9734  VmaSuballocationType type,
    9735  VkDeviceSize allocSize,
    9736  bool upperAddress,
    9737  VmaAllocation hAllocation)
    9738 {
    9739  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9740  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9741 
    9742  Node* currNode = m_FreeList[currLevel].front;
    9743  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9744  while(currNode->offset != request.offset)
    9745  {
    9746  currNode = currNode->free.next;
    9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9748  }
    9749 
    9750  // Go down, splitting free nodes.
    9751  while(currLevel < targetLevel)
    9752  {
    9753  // currNode is already first free node at currLevel.
    9754  // Remove it from list of free nodes at this currLevel.
    9755  RemoveFromFreeList(currLevel, currNode);
    9756 
    9757  const uint32_t childrenLevel = currLevel + 1;
    9758 
    9759  // Create two free sub-nodes.
    9760  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9761  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9762 
    9763  leftChild->offset = currNode->offset;
    9764  leftChild->type = Node::TYPE_FREE;
    9765  leftChild->parent = currNode;
    9766  leftChild->buddy = rightChild;
    9767 
    9768  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9769  rightChild->type = Node::TYPE_FREE;
    9770  rightChild->parent = currNode;
    9771  rightChild->buddy = leftChild;
    9772 
    9773  // Convert current currNode to split type.
    9774  currNode->type = Node::TYPE_SPLIT;
    9775  currNode->split.leftChild = leftChild;
    9776 
    9777  // Add child nodes to free list. Order is important!
    9778  AddToFreeListFront(childrenLevel, rightChild);
    9779  AddToFreeListFront(childrenLevel, leftChild);
    9780 
    9781  ++m_FreeCount;
    9782  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9783  ++currLevel;
    9784  currNode = m_FreeList[currLevel].front;
    9785 
    9786  /*
    9787  We can be sure that currNode, as left child of node previously split,
    9788  also fullfills the alignment requirement.
    9789  */
    9790  }
    9791 
    9792  // Remove from free list.
    9793  VMA_ASSERT(currLevel == targetLevel &&
    9794  currNode != VMA_NULL &&
    9795  currNode->type == Node::TYPE_FREE);
    9796  RemoveFromFreeList(currLevel, currNode);
    9797 
    9798  // Convert to allocation node.
    9799  currNode->type = Node::TYPE_ALLOCATION;
    9800  currNode->allocation.alloc = hAllocation;
    9801 
    9802  ++m_AllocationCount;
    9803  --m_FreeCount;
    9804  m_SumFreeSize -= allocSize;
    9805 }
    9806 
    9807 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9808 {
    9809  if(node->type == Node::TYPE_SPLIT)
    9810  {
    9811  DeleteNode(node->split.leftChild->buddy);
    9812  DeleteNode(node->split.leftChild);
    9813  }
    9814 
    9815  vma_delete(GetAllocationCallbacks(), node);
    9816 }
    9817 
    9818 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9819 {
    9820  VMA_VALIDATE(level < m_LevelCount);
    9821  VMA_VALIDATE(curr->parent == parent);
    9822  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9823  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9824  switch(curr->type)
    9825  {
    9826  case Node::TYPE_FREE:
    9827  // curr->free.prev, next are validated separately.
    9828  ctx.calculatedSumFreeSize += levelNodeSize;
    9829  ++ctx.calculatedFreeCount;
    9830  break;
    9831  case Node::TYPE_ALLOCATION:
    9832  ++ctx.calculatedAllocationCount;
    9833  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9834  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9835  break;
    9836  case Node::TYPE_SPLIT:
    9837  {
    9838  const uint32_t childrenLevel = level + 1;
    9839  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9840  const Node* const leftChild = curr->split.leftChild;
    9841  VMA_VALIDATE(leftChild != VMA_NULL);
    9842  VMA_VALIDATE(leftChild->offset == curr->offset);
    9843  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9844  {
    9845  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9846  }
    9847  const Node* const rightChild = leftChild->buddy;
    9848  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9849  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9850  {
    9851  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9852  }
    9853  }
    9854  break;
    9855  default:
    9856  return false;
    9857  }
    9858 
    9859  return true;
    9860 }
    9861 
    9862 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9863 {
    9864  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9865  uint32_t level = 0;
    9866  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9867  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9868  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9869  {
    9870  ++level;
    9871  currLevelNodeSize = nextLevelNodeSize;
    9872  nextLevelNodeSize = currLevelNodeSize >> 1;
    9873  }
    9874  return level;
    9875 }
    9876 
    9877 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9878 {
    9879  // Find node and level.
    9880  Node* node = m_Root;
    9881  VkDeviceSize nodeOffset = 0;
    9882  uint32_t level = 0;
    9883  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9884  while(node->type == Node::TYPE_SPLIT)
    9885  {
    9886  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9887  if(offset < nodeOffset + nextLevelSize)
    9888  {
    9889  node = node->split.leftChild;
    9890  }
    9891  else
    9892  {
    9893  node = node->split.leftChild->buddy;
    9894  nodeOffset += nextLevelSize;
    9895  }
    9896  ++level;
    9897  levelNodeSize = nextLevelSize;
    9898  }
    9899 
    9900  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9901  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9902 
    9903  ++m_FreeCount;
    9904  --m_AllocationCount;
    9905  m_SumFreeSize += alloc->GetSize();
    9906 
    9907  node->type = Node::TYPE_FREE;
    9908 
    9909  // Join free nodes if possible.
    9910  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9911  {
    9912  RemoveFromFreeList(level, node->buddy);
    9913  Node* const parent = node->parent;
    9914 
    9915  vma_delete(GetAllocationCallbacks(), node->buddy);
    9916  vma_delete(GetAllocationCallbacks(), node);
    9917  parent->type = Node::TYPE_FREE;
    9918 
    9919  node = parent;
    9920  --level;
    9921  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9922  --m_FreeCount;
    9923  }
    9924 
    9925  AddToFreeListFront(level, node);
    9926 }
    9927 
    9928 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9929 {
    9930  switch(node->type)
    9931  {
    9932  case Node::TYPE_FREE:
    9933  ++outInfo.unusedRangeCount;
    9934  outInfo.unusedBytes += levelNodeSize;
    9935  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9936  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9937  break;
    9938  case Node::TYPE_ALLOCATION:
    9939  {
    9940  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9941  ++outInfo.allocationCount;
    9942  outInfo.usedBytes += allocSize;
    9943  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9944  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9945 
    9946  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9947  if(unusedRangeSize > 0)
    9948  {
    9949  ++outInfo.unusedRangeCount;
    9950  outInfo.unusedBytes += unusedRangeSize;
    9951  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9952  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9953  }
    9954  }
    9955  break;
    9956  case Node::TYPE_SPLIT:
    9957  {
    9958  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9959  const Node* const leftChild = node->split.leftChild;
    9960  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9961  const Node* const rightChild = leftChild->buddy;
    9962  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9963  }
    9964  break;
    9965  default:
    9966  VMA_ASSERT(0);
    9967  }
    9968 }
    9969 
    9970 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9971 {
    9972  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9973 
    9974  // List is empty.
    9975  Node* const frontNode = m_FreeList[level].front;
    9976  if(frontNode == VMA_NULL)
    9977  {
    9978  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9979  node->free.prev = node->free.next = VMA_NULL;
    9980  m_FreeList[level].front = m_FreeList[level].back = node;
    9981  }
    9982  else
    9983  {
    9984  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9985  node->free.prev = VMA_NULL;
    9986  node->free.next = frontNode;
    9987  frontNode->free.prev = node;
    9988  m_FreeList[level].front = node;
    9989  }
    9990 }
    9991 
    9992 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9993 {
    9994  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9995 
    9996  // It is at the front.
    9997  if(node->free.prev == VMA_NULL)
    9998  {
    9999  VMA_ASSERT(m_FreeList[level].front == node);
    10000  m_FreeList[level].front = node->free.next;
    10001  }
    10002  else
    10003  {
    10004  Node* const prevFreeNode = node->free.prev;
    10005  VMA_ASSERT(prevFreeNode->free.next == node);
    10006  prevFreeNode->free.next = node->free.next;
    10007  }
    10008 
    10009  // It is at the back.
    10010  if(node->free.next == VMA_NULL)
    10011  {
    10012  VMA_ASSERT(m_FreeList[level].back == node);
    10013  m_FreeList[level].back = node->free.prev;
    10014  }
    10015  else
    10016  {
    10017  Node* const nextFreeNode = node->free.next;
    10018  VMA_ASSERT(nextFreeNode->free.prev == node);
    10019  nextFreeNode->free.prev = node->free.prev;
    10020  }
    10021 }
    10022 
    10023 #if VMA_STATS_STRING_ENABLED
    10024 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10025 {
    10026  switch(node->type)
    10027  {
    10028  case Node::TYPE_FREE:
    10029  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10030  break;
    10031  case Node::TYPE_ALLOCATION:
    10032  {
    10033  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10034  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10035  if(allocSize < levelNodeSize)
    10036  {
    10037  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10038  }
    10039  }
    10040  break;
    10041  case Node::TYPE_SPLIT:
    10042  {
    10043  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10044  const Node* const leftChild = node->split.leftChild;
    10045  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10046  const Node* const rightChild = leftChild->buddy;
    10047  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10048  }
    10049  break;
    10050  default:
    10051  VMA_ASSERT(0);
    10052  }
    10053 }
    10054 #endif // #if VMA_STATS_STRING_ENABLED
    10055 
    10056 
    10058 // class VmaDeviceMemoryBlock
    10059 
    10060 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10061  m_pMetadata(VMA_NULL),
    10062  m_MemoryTypeIndex(UINT32_MAX),
    10063  m_Id(0),
    10064  m_hMemory(VK_NULL_HANDLE),
    10065  m_MapCount(0),
    10066  m_pMappedData(VMA_NULL)
    10067 {
    10068 }
    10069 
    10070 void VmaDeviceMemoryBlock::Init(
    10071  VmaAllocator hAllocator,
    10072  uint32_t newMemoryTypeIndex,
    10073  VkDeviceMemory newMemory,
    10074  VkDeviceSize newSize,
    10075  uint32_t id,
    10076  uint32_t algorithm)
    10077 {
    10078  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10079 
    10080  m_MemoryTypeIndex = newMemoryTypeIndex;
    10081  m_Id = id;
    10082  m_hMemory = newMemory;
    10083 
    10084  switch(algorithm)
    10085  {
    10087  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10088  break;
    10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10091  break;
    10092  default:
    10093  VMA_ASSERT(0);
    10094  // Fall-through.
    10095  case 0:
    10096  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10097  }
    10098  m_pMetadata->Init(newSize);
    10099 }
    10100 
    10101 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10102 {
    10103  // This is the most important assert in the entire library.
    10104  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10105  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10106 
    10107  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10108  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10109  m_hMemory = VK_NULL_HANDLE;
    10110 
    10111  vma_delete(allocator, m_pMetadata);
    10112  m_pMetadata = VMA_NULL;
    10113 }
    10114 
    10115 bool VmaDeviceMemoryBlock::Validate() const
    10116 {
    10117  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10118  (m_pMetadata->GetSize() != 0));
    10119 
    10120  return m_pMetadata->Validate();
    10121 }
    10122 
    10123 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10124 {
    10125  void* pData = nullptr;
    10126  VkResult res = Map(hAllocator, 1, &pData);
    10127  if(res != VK_SUCCESS)
    10128  {
    10129  return res;
    10130  }
    10131 
    10132  res = m_pMetadata->CheckCorruption(pData);
    10133 
    10134  Unmap(hAllocator, 1);
    10135 
    10136  return res;
    10137 }
    10138 
    10139 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10140 {
    10141  if(count == 0)
    10142  {
    10143  return VK_SUCCESS;
    10144  }
    10145 
    10146  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10147  if(m_MapCount != 0)
    10148  {
    10149  m_MapCount += count;
    10150  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10151  if(ppData != VMA_NULL)
    10152  {
    10153  *ppData = m_pMappedData;
    10154  }
    10155  return VK_SUCCESS;
    10156  }
    10157  else
    10158  {
    10159  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10160  hAllocator->m_hDevice,
    10161  m_hMemory,
    10162  0, // offset
    10163  VK_WHOLE_SIZE,
    10164  0, // flags
    10165  &m_pMappedData);
    10166  if(result == VK_SUCCESS)
    10167  {
    10168  if(ppData != VMA_NULL)
    10169  {
    10170  *ppData = m_pMappedData;
    10171  }
    10172  m_MapCount = count;
    10173  }
    10174  return result;
    10175  }
    10176 }
    10177 
    10178 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10179 {
    10180  if(count == 0)
    10181  {
    10182  return;
    10183  }
    10184 
    10185  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10186  if(m_MapCount >= count)
    10187  {
    10188  m_MapCount -= count;
    10189  if(m_MapCount == 0)
    10190  {
    10191  m_pMappedData = VMA_NULL;
    10192  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10193  }
    10194  }
    10195  else
    10196  {
    10197  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10198  }
    10199 }
    10200 
    10201 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10202 {
    10203  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10204  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10205 
    10206  void* pData;
    10207  VkResult res = Map(hAllocator, 1, &pData);
    10208  if(res != VK_SUCCESS)
    10209  {
    10210  return res;
    10211  }
    10212 
    10213  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10214  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10215 
    10216  Unmap(hAllocator, 1);
    10217 
    10218  return VK_SUCCESS;
    10219 }
    10220 
    10221 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10222 {
    10223  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10224  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10225 
    10226  void* pData;
    10227  VkResult res = Map(hAllocator, 1, &pData);
    10228  if(res != VK_SUCCESS)
    10229  {
    10230  return res;
    10231  }
    10232 
    10233  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10234  {
    10235  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10236  }
    10237  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10240  }
    10241 
    10242  Unmap(hAllocator, 1);
    10243 
    10244  return VK_SUCCESS;
    10245 }
    10246 
    10247 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10248  const VmaAllocator hAllocator,
    10249  const VmaAllocation hAllocation,
    10250  VkBuffer hBuffer)
    10251 {
    10252  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10253  hAllocation->GetBlock() == this);
    10254  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10255  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10256  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10257  hAllocator->m_hDevice,
    10258  hBuffer,
    10259  m_hMemory,
    10260  hAllocation->GetOffset());
    10261 }
    10262 
    10263 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10264  const VmaAllocator hAllocator,
    10265  const VmaAllocation hAllocation,
    10266  VkImage hImage)
    10267 {
    10268  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10269  hAllocation->GetBlock() == this);
    10270  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10271  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10272  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10273  hAllocator->m_hDevice,
    10274  hImage,
    10275  m_hMemory,
    10276  hAllocation->GetOffset());
    10277 }
    10278 
    10279 static void InitStatInfo(VmaStatInfo& outInfo)
    10280 {
    10281  memset(&outInfo, 0, sizeof(outInfo));
    10282  outInfo.allocationSizeMin = UINT64_MAX;
    10283  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10284 }
    10285 
    10286 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10287 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10288 {
    10289  inoutInfo.blockCount += srcInfo.blockCount;
    10290  inoutInfo.allocationCount += srcInfo.allocationCount;
    10291  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10292  inoutInfo.usedBytes += srcInfo.usedBytes;
    10293  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10294  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10295  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10296  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10297  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10298 }
    10299 
    10300 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10301 {
    10302  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10303  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10304  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10305  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10306 }
    10307 
    10308 VmaPool_T::VmaPool_T(
    10309  VmaAllocator hAllocator,
    10310  const VmaPoolCreateInfo& createInfo,
    10311  VkDeviceSize preferredBlockSize) :
    10312  m_BlockVector(
    10313  hAllocator,
    10314  createInfo.memoryTypeIndex,
    10315  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10316  createInfo.minBlockCount,
    10317  createInfo.maxBlockCount,
    10318  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10319  createInfo.frameInUseCount,
    10320  true, // isCustomPool
    10321  createInfo.blockSize != 0, // explicitBlockSize
    10322  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10323  m_Id(0)
    10324 {
    10325 }
    10326 
    10327 VmaPool_T::~VmaPool_T()
    10328 {
    10329 }
    10330 
    10331 #if VMA_STATS_STRING_ENABLED
    10332 
    10333 #endif // #if VMA_STATS_STRING_ENABLED
    10334 
    10335 VmaBlockVector::VmaBlockVector(
    10336  VmaAllocator hAllocator,
    10337  uint32_t memoryTypeIndex,
    10338  VkDeviceSize preferredBlockSize,
    10339  size_t minBlockCount,
    10340  size_t maxBlockCount,
    10341  VkDeviceSize bufferImageGranularity,
    10342  uint32_t frameInUseCount,
    10343  bool isCustomPool,
    10344  bool explicitBlockSize,
    10345  uint32_t algorithm) :
    10346  m_hAllocator(hAllocator),
    10347  m_MemoryTypeIndex(memoryTypeIndex),
    10348  m_PreferredBlockSize(preferredBlockSize),
    10349  m_MinBlockCount(minBlockCount),
    10350  m_MaxBlockCount(maxBlockCount),
    10351  m_BufferImageGranularity(bufferImageGranularity),
    10352  m_FrameInUseCount(frameInUseCount),
    10353  m_IsCustomPool(isCustomPool),
    10354  m_ExplicitBlockSize(explicitBlockSize),
    10355  m_Algorithm(algorithm),
    10356  m_HasEmptyBlock(false),
    10357  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10358  m_pDefragmentator(VMA_NULL),
    10359  m_NextBlockId(0)
    10360 {
    10361 }
    10362 
    10363 VmaBlockVector::~VmaBlockVector()
    10364 {
    10365  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10366 
    10367  for(size_t i = m_Blocks.size(); i--; )
    10368  {
    10369  m_Blocks[i]->Destroy(m_hAllocator);
    10370  vma_delete(m_hAllocator, m_Blocks[i]);
    10371  }
    10372 }
    10373 
    10374 VkResult VmaBlockVector::CreateMinBlocks()
    10375 {
    10376  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10377  {
    10378  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10379  if(res != VK_SUCCESS)
    10380  {
    10381  return res;
    10382  }
    10383  }
    10384  return VK_SUCCESS;
    10385 }
    10386 
    10387 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10388 {
    10389  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10390 
    10391  const size_t blockCount = m_Blocks.size();
    10392 
    10393  pStats->size = 0;
    10394  pStats->unusedSize = 0;
    10395  pStats->allocationCount = 0;
    10396  pStats->unusedRangeCount = 0;
    10397  pStats->unusedRangeSizeMax = 0;
    10398  pStats->blockCount = blockCount;
    10399 
    10400  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10401  {
    10402  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10403  VMA_ASSERT(pBlock);
    10404  VMA_HEAVY_ASSERT(pBlock->Validate());
    10405  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10406  }
    10407 }
    10408 
    10409 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10410 {
    10411  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10412  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10413  (VMA_DEBUG_MARGIN > 0) &&
    10414  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10415 }
    10416 
    10417 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10418 
    10419 VkResult VmaBlockVector::Allocate(
    10420  VmaPool hCurrentPool,
    10421  uint32_t currentFrameIndex,
    10422  VkDeviceSize size,
    10423  VkDeviceSize alignment,
    10424  const VmaAllocationCreateInfo& createInfo,
    10425  VmaSuballocationType suballocType,
    10426  VmaAllocation* pAllocation)
    10427 {
    10428  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10429  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10430  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10431  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10432  const bool canCreateNewBlock =
    10433  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10434  (m_Blocks.size() < m_MaxBlockCount);
    10435  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10436 
    10437  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10438  // Which in turn is available only when maxBlockCount = 1.
    10439  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10440  {
    10441  canMakeOtherLost = false;
    10442  }
    10443 
    10444  // Upper address can only be used with linear allocator and within single memory block.
    10445  if(isUpperAddress &&
    10446  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10447  {
    10448  return VK_ERROR_FEATURE_NOT_PRESENT;
    10449  }
    10450 
    10451  // Validate strategy.
    10452  switch(strategy)
    10453  {
    10454  case 0:
    10456  break;
    10460  break;
    10461  default:
    10462  return VK_ERROR_FEATURE_NOT_PRESENT;
    10463  }
    10464 
    10465  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10466  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10467  {
    10468  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10469  }
    10470 
    10471  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10472 
    10473  /*
    10474  Under certain condition, this whole section can be skipped for optimization, so
    10475  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10476  e.g. for custom pools with linear algorithm.
    10477  */
    10478  if(!canMakeOtherLost || canCreateNewBlock)
    10479  {
    10480  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10481  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10483 
    10484  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10485  {
    10486  // Use only last block.
    10487  if(!m_Blocks.empty())
    10488  {
    10489  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10490  VMA_ASSERT(pCurrBlock);
    10491  VkResult res = AllocateFromBlock(
    10492  pCurrBlock,
    10493  hCurrentPool,
    10494  currentFrameIndex,
    10495  size,
    10496  alignment,
    10497  allocFlagsCopy,
    10498  createInfo.pUserData,
    10499  suballocType,
    10500  strategy,
    10501  pAllocation);
    10502  if(res == VK_SUCCESS)
    10503  {
    10504  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10505  return VK_SUCCESS;
    10506  }
    10507  }
    10508  }
    10509  else
    10510  {
    10512  {
    10513  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10514  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10515  {
    10516  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10517  VMA_ASSERT(pCurrBlock);
    10518  VkResult res = AllocateFromBlock(
    10519  pCurrBlock,
    10520  hCurrentPool,
    10521  currentFrameIndex,
    10522  size,
    10523  alignment,
    10524  allocFlagsCopy,
    10525  createInfo.pUserData,
    10526  suballocType,
    10527  strategy,
    10528  pAllocation);
    10529  if(res == VK_SUCCESS)
    10530  {
    10531  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10532  return VK_SUCCESS;
    10533  }
    10534  }
    10535  }
    10536  else // WORST_FIT, FIRST_FIT
    10537  {
    10538  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10539  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10540  {
    10541  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10542  VMA_ASSERT(pCurrBlock);
    10543  VkResult res = AllocateFromBlock(
    10544  pCurrBlock,
    10545  hCurrentPool,
    10546  currentFrameIndex,
    10547  size,
    10548  alignment,
    10549  allocFlagsCopy,
    10550  createInfo.pUserData,
    10551  suballocType,
    10552  strategy,
    10553  pAllocation);
    10554  if(res == VK_SUCCESS)
    10555  {
    10556  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10557  return VK_SUCCESS;
    10558  }
    10559  }
    10560  }
    10561  }
    10562 
    10563  // 2. Try to create new block.
    10564  if(canCreateNewBlock)
    10565  {
    10566  // Calculate optimal size for new block.
    10567  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10568  uint32_t newBlockSizeShift = 0;
    10569  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10570 
    10571  if(!m_ExplicitBlockSize)
    10572  {
    10573  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10574  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10575  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10576  {
    10577  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10578  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10579  {
    10580  newBlockSize = smallerNewBlockSize;
    10581  ++newBlockSizeShift;
    10582  }
    10583  else
    10584  {
    10585  break;
    10586  }
    10587  }
    10588  }
    10589 
    10590  size_t newBlockIndex = 0;
    10591  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10592  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10593  if(!m_ExplicitBlockSize)
    10594  {
    10595  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10596  {
    10597  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10598  if(smallerNewBlockSize >= size)
    10599  {
    10600  newBlockSize = smallerNewBlockSize;
    10601  ++newBlockSizeShift;
    10602  res = CreateBlock(newBlockSize, &newBlockIndex);
    10603  }
    10604  else
    10605  {
    10606  break;
    10607  }
    10608  }
    10609  }
    10610 
    10611  if(res == VK_SUCCESS)
    10612  {
    10613  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10614  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10615 
    10616  res = AllocateFromBlock(
    10617  pBlock,
    10618  hCurrentPool,
    10619  currentFrameIndex,
    10620  size,
    10621  alignment,
    10622  allocFlagsCopy,
    10623  createInfo.pUserData,
    10624  suballocType,
    10625  strategy,
    10626  pAllocation);
    10627  if(res == VK_SUCCESS)
    10628  {
    10629  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10630  return VK_SUCCESS;
    10631  }
    10632  else
    10633  {
    10634  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10635  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10636  }
    10637  }
    10638  }
    10639  }
    10640 
    10641  // 3. Try to allocate from existing blocks with making other allocations lost.
    10642  if(canMakeOtherLost)
    10643  {
    10644  uint32_t tryIndex = 0;
    10645  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10646  {
    10647  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10648  VmaAllocationRequest bestRequest = {};
    10649  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10650 
    10651  // 1. Search existing allocations.
    10653  {
    10654  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10655  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10656  {
    10657  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10658  VMA_ASSERT(pCurrBlock);
    10659  VmaAllocationRequest currRequest = {};
    10660  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10661  currentFrameIndex,
    10662  m_FrameInUseCount,
    10663  m_BufferImageGranularity,
    10664  size,
    10665  alignment,
    10666  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10667  suballocType,
    10668  canMakeOtherLost,
    10669  strategy,
    10670  &currRequest))
    10671  {
    10672  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10673  if(pBestRequestBlock == VMA_NULL ||
    10674  currRequestCost < bestRequestCost)
    10675  {
    10676  pBestRequestBlock = pCurrBlock;
    10677  bestRequest = currRequest;
    10678  bestRequestCost = currRequestCost;
    10679 
    10680  if(bestRequestCost == 0)
    10681  {
    10682  break;
    10683  }
    10684  }
    10685  }
    10686  }
    10687  }
    10688  else // WORST_FIT, FIRST_FIT
    10689  {
    10690  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10691  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10692  {
    10693  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10694  VMA_ASSERT(pCurrBlock);
    10695  VmaAllocationRequest currRequest = {};
    10696  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10697  currentFrameIndex,
    10698  m_FrameInUseCount,
    10699  m_BufferImageGranularity,
    10700  size,
    10701  alignment,
    10702  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10703  suballocType,
    10704  canMakeOtherLost,
    10705  strategy,
    10706  &currRequest))
    10707  {
    10708  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10709  if(pBestRequestBlock == VMA_NULL ||
    10710  currRequestCost < bestRequestCost ||
    10712  {
    10713  pBestRequestBlock = pCurrBlock;
    10714  bestRequest = currRequest;
    10715  bestRequestCost = currRequestCost;
    10716 
    10717  if(bestRequestCost == 0 ||
    10719  {
    10720  break;
    10721  }
    10722  }
    10723  }
    10724  }
    10725  }
    10726 
    10727  if(pBestRequestBlock != VMA_NULL)
    10728  {
    10729  if(mapped)
    10730  {
    10731  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10732  if(res != VK_SUCCESS)
    10733  {
    10734  return res;
    10735  }
    10736  }
    10737 
    10738  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10739  currentFrameIndex,
    10740  m_FrameInUseCount,
    10741  &bestRequest))
    10742  {
    10743  // We no longer have an empty Allocation.
    10744  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10745  {
    10746  m_HasEmptyBlock = false;
    10747  }
    10748  // Allocate from this pBlock.
    10749  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10750  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10751  (*pAllocation)->InitBlockAllocation(
    10752  hCurrentPool,
    10753  pBestRequestBlock,
    10754  bestRequest.offset,
    10755  alignment,
    10756  size,
    10757  suballocType,
    10758  mapped,
    10759  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10760  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10761  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10762  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10763  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10764  {
    10765  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10766  }
    10767  if(IsCorruptionDetectionEnabled())
    10768  {
    10769  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10770  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10771  }
    10772  return VK_SUCCESS;
    10773  }
    10774  // else: Some allocations must have been touched while we are here. Next try.
    10775  }
    10776  else
    10777  {
    10778  // Could not find place in any of the blocks - break outer loop.
    10779  break;
    10780  }
    10781  }
    10782  /* Maximum number of tries exceeded - a very unlike event when many other
    10783  threads are simultaneously touching allocations making it impossible to make
    10784  lost at the same time as we try to allocate. */
    10785  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10786  {
    10787  return VK_ERROR_TOO_MANY_OBJECTS;
    10788  }
    10789  }
    10790 
    10791  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10792 }
    10793 
    10794 void VmaBlockVector::Free(
    10795  VmaAllocation hAllocation)
    10796 {
    10797  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10798 
    10799  // Scope for lock.
    10800  {
    10801  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10802 
    10803  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10804 
    10805  if(IsCorruptionDetectionEnabled())
    10806  {
    10807  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10808  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10809  }
    10810 
    10811  if(hAllocation->IsPersistentMap())
    10812  {
    10813  pBlock->Unmap(m_hAllocator, 1);
    10814  }
    10815 
    10816  pBlock->m_pMetadata->Free(hAllocation);
    10817  VMA_HEAVY_ASSERT(pBlock->Validate());
    10818 
    10819  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10820 
    10821  // pBlock became empty after this deallocation.
    10822  if(pBlock->m_pMetadata->IsEmpty())
    10823  {
    10824  // Already has empty Allocation. We don't want to have two, so delete this one.
    10825  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10826  {
    10827  pBlockToDelete = pBlock;
    10828  Remove(pBlock);
    10829  }
    10830  // We now have first empty block.
    10831  else
    10832  {
    10833  m_HasEmptyBlock = true;
    10834  }
    10835  }
    10836  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10837  // (This is optional, heuristics.)
    10838  else if(m_HasEmptyBlock)
    10839  {
    10840  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10841  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10842  {
    10843  pBlockToDelete = pLastBlock;
    10844  m_Blocks.pop_back();
    10845  m_HasEmptyBlock = false;
    10846  }
    10847  }
    10848 
    10849  IncrementallySortBlocks();
    10850  }
    10851 
    10852  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10853  // lock, for performance reason.
    10854  if(pBlockToDelete != VMA_NULL)
    10855  {
    10856  VMA_DEBUG_LOG(" Deleted empty allocation");
    10857  pBlockToDelete->Destroy(m_hAllocator);
    10858  vma_delete(m_hAllocator, pBlockToDelete);
    10859  }
    10860 }
    10861 
    10862 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10863 {
    10864  VkDeviceSize result = 0;
    10865  for(size_t i = m_Blocks.size(); i--; )
    10866  {
    10867  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10868  if(result >= m_PreferredBlockSize)
    10869  {
    10870  break;
    10871  }
    10872  }
    10873  return result;
    10874 }
    10875 
    10876 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10877 {
    10878  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10879  {
    10880  if(m_Blocks[blockIndex] == pBlock)
    10881  {
    10882  VmaVectorRemove(m_Blocks, blockIndex);
    10883  return;
    10884  }
    10885  }
    10886  VMA_ASSERT(0);
    10887 }
    10888 
    10889 void VmaBlockVector::IncrementallySortBlocks()
    10890 {
    10891  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10892  {
    10893  // Bubble sort only until first swap.
    10894  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10895  {
    10896  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10897  {
    10898  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10899  return;
    10900  }
    10901  }
    10902  }
    10903 }
    10904 
    10905 VkResult VmaBlockVector::AllocateFromBlock(
    10906  VmaDeviceMemoryBlock* pBlock,
    10907  VmaPool hCurrentPool,
    10908  uint32_t currentFrameIndex,
    10909  VkDeviceSize size,
    10910  VkDeviceSize alignment,
    10911  VmaAllocationCreateFlags allocFlags,
    10912  void* pUserData,
    10913  VmaSuballocationType suballocType,
    10914  uint32_t strategy,
    10915  VmaAllocation* pAllocation)
    10916 {
    10917  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10918  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10919  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10920  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10921 
    10922  VmaAllocationRequest currRequest = {};
    10923  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10924  currentFrameIndex,
    10925  m_FrameInUseCount,
    10926  m_BufferImageGranularity,
    10927  size,
    10928  alignment,
    10929  isUpperAddress,
    10930  suballocType,
    10931  false, // canMakeOtherLost
    10932  strategy,
    10933  &currRequest))
    10934  {
    10935  // Allocate from pCurrBlock.
    10936  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10937 
    10938  if(mapped)
    10939  {
    10940  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10941  if(res != VK_SUCCESS)
    10942  {
    10943  return res;
    10944  }
    10945  }
    10946 
    10947  // We no longer have an empty Allocation.
    10948  if(pBlock->m_pMetadata->IsEmpty())
    10949  {
    10950  m_HasEmptyBlock = false;
    10951  }
    10952 
    10953  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10954  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10955  (*pAllocation)->InitBlockAllocation(
    10956  hCurrentPool,
    10957  pBlock,
    10958  currRequest.offset,
    10959  alignment,
    10960  size,
    10961  suballocType,
    10962  mapped,
    10963  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10964  VMA_HEAVY_ASSERT(pBlock->Validate());
    10965  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10966  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10967  {
    10968  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10969  }
    10970  if(IsCorruptionDetectionEnabled())
    10971  {
    10972  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10973  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10974  }
    10975  return VK_SUCCESS;
    10976  }
    10977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10978 }
    10979 
    10980 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10981 {
    10982  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10983  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10984  allocInfo.allocationSize = blockSize;
    10985  VkDeviceMemory mem = VK_NULL_HANDLE;
    10986  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10987  if(res < 0)
    10988  {
    10989  return res;
    10990  }
    10991 
    10992  // New VkDeviceMemory successfully created.
    10993 
    10994  // Create new Allocation for it.
    10995  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10996  pBlock->Init(
    10997  m_hAllocator,
    10998  m_MemoryTypeIndex,
    10999  mem,
    11000  allocInfo.allocationSize,
    11001  m_NextBlockId++,
    11002  m_Algorithm);
    11003 
    11004  m_Blocks.push_back(pBlock);
    11005  if(pNewBlockIndex != VMA_NULL)
    11006  {
    11007  *pNewBlockIndex = m_Blocks.size() - 1;
    11008  }
    11009 
    11010  return VK_SUCCESS;
    11011 }
    11012 
    11013 #if VMA_STATS_STRING_ENABLED
    11014 
    11015 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11016 {
    11017  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11018 
    11019  json.BeginObject();
    11020 
    11021  if(m_IsCustomPool)
    11022  {
    11023  json.WriteString("MemoryTypeIndex");
    11024  json.WriteNumber(m_MemoryTypeIndex);
    11025 
    11026  json.WriteString("BlockSize");
    11027  json.WriteNumber(m_PreferredBlockSize);
    11028 
    11029  json.WriteString("BlockCount");
    11030  json.BeginObject(true);
    11031  if(m_MinBlockCount > 0)
    11032  {
    11033  json.WriteString("Min");
    11034  json.WriteNumber((uint64_t)m_MinBlockCount);
    11035  }
    11036  if(m_MaxBlockCount < SIZE_MAX)
    11037  {
    11038  json.WriteString("Max");
    11039  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11040  }
    11041  json.WriteString("Cur");
    11042  json.WriteNumber((uint64_t)m_Blocks.size());
    11043  json.EndObject();
    11044 
    11045  if(m_FrameInUseCount > 0)
    11046  {
    11047  json.WriteString("FrameInUseCount");
    11048  json.WriteNumber(m_FrameInUseCount);
    11049  }
    11050 
    11051  if(m_Algorithm != 0)
    11052  {
    11053  json.WriteString("Algorithm");
    11054  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11055  }
    11056  }
    11057  else
    11058  {
    11059  json.WriteString("PreferredBlockSize");
    11060  json.WriteNumber(m_PreferredBlockSize);
    11061  }
    11062 
    11063  json.WriteString("Blocks");
    11064  json.BeginObject();
    11065  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11066  {
    11067  json.BeginString();
    11068  json.ContinueString(m_Blocks[i]->GetId());
    11069  json.EndString();
    11070 
    11071  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11072  }
    11073  json.EndObject();
    11074 
    11075  json.EndObject();
    11076 }
    11077 
    11078 #endif // #if VMA_STATS_STRING_ENABLED
    11079 
    11080 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11081  VmaAllocator hAllocator,
    11082  uint32_t currentFrameIndex)
    11083 {
    11084  if(m_pDefragmentator == VMA_NULL)
    11085  {
    11086  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11087  hAllocator,
    11088  this,
    11089  currentFrameIndex);
    11090  }
    11091 
    11092  return m_pDefragmentator;
    11093 }
    11094 
    11095 VkResult VmaBlockVector::Defragment(
    11096  VmaDefragmentationStats* pDefragmentationStats,
    11097  VkDeviceSize& maxBytesToMove,
    11098  uint32_t& maxAllocationsToMove)
    11099 {
    11100  if(m_pDefragmentator == VMA_NULL)
    11101  {
    11102  return VK_SUCCESS;
    11103  }
    11104 
    11105  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11106 
    11107  // Defragment.
    11108  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11109 
    11110  // Accumulate statistics.
    11111  if(pDefragmentationStats != VMA_NULL)
    11112  {
    11113  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11114  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11115  pDefragmentationStats->bytesMoved += bytesMoved;
    11116  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11119  maxBytesToMove -= bytesMoved;
    11120  maxAllocationsToMove -= allocationsMoved;
    11121  }
    11122 
    11123  // Free empty blocks.
    11124  m_HasEmptyBlock = false;
    11125  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11126  {
    11127  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11128  if(pBlock->m_pMetadata->IsEmpty())
    11129  {
    11130  if(m_Blocks.size() > m_MinBlockCount)
    11131  {
    11132  if(pDefragmentationStats != VMA_NULL)
    11133  {
    11134  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11135  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11136  }
    11137 
    11138  VmaVectorRemove(m_Blocks, blockIndex);
    11139  pBlock->Destroy(m_hAllocator);
    11140  vma_delete(m_hAllocator, pBlock);
    11141  }
    11142  else
    11143  {
    11144  m_HasEmptyBlock = true;
    11145  }
    11146  }
    11147  }
    11148 
    11149  return result;
    11150 }
    11151 
    11152 void VmaBlockVector::DestroyDefragmentator()
    11153 {
    11154  if(m_pDefragmentator != VMA_NULL)
    11155  {
    11156  vma_delete(m_hAllocator, m_pDefragmentator);
    11157  m_pDefragmentator = VMA_NULL;
    11158  }
    11159 }
    11160 
    11161 void VmaBlockVector::MakePoolAllocationsLost(
    11162  uint32_t currentFrameIndex,
    11163  size_t* pLostAllocationCount)
    11164 {
    11165  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11166  size_t lostAllocationCount = 0;
    11167  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11168  {
    11169  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11170  VMA_ASSERT(pBlock);
    11171  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11172  }
    11173  if(pLostAllocationCount != VMA_NULL)
    11174  {
    11175  *pLostAllocationCount = lostAllocationCount;
    11176  }
    11177 }
    11178 
    11179 VkResult VmaBlockVector::CheckCorruption()
    11180 {
    11181  if(!IsCorruptionDetectionEnabled())
    11182  {
    11183  return VK_ERROR_FEATURE_NOT_PRESENT;
    11184  }
    11185 
    11186  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11187  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11188  {
    11189  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11190  VMA_ASSERT(pBlock);
    11191  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11192  if(res != VK_SUCCESS)
    11193  {
    11194  return res;
    11195  }
    11196  }
    11197  return VK_SUCCESS;
    11198 }
    11199 
    11200 void VmaBlockVector::AddStats(VmaStats* pStats)
    11201 {
    11202  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11203  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11204 
    11205  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11206 
    11207  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11208  {
    11209  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11210  VMA_ASSERT(pBlock);
    11211  VMA_HEAVY_ASSERT(pBlock->Validate());
    11212  VmaStatInfo allocationStatInfo;
    11213  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11214  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11215  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11216  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11217  }
    11218 }
    11219 
    11221 // VmaDefragmentator members definition
    11222 
    11223 VmaDefragmentator::VmaDefragmentator(
    11224  VmaAllocator hAllocator,
    11225  VmaBlockVector* pBlockVector,
    11226  uint32_t currentFrameIndex) :
    11227  m_hAllocator(hAllocator),
    11228  m_pBlockVector(pBlockVector),
    11229  m_CurrentFrameIndex(currentFrameIndex),
    11230  m_BytesMoved(0),
    11231  m_AllocationsMoved(0),
    11232  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11233  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11234 {
    11235  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11236 }
    11237 
    11238 VmaDefragmentator::~VmaDefragmentator()
    11239 {
    11240  for(size_t i = m_Blocks.size(); i--; )
    11241  {
    11242  vma_delete(m_hAllocator, m_Blocks[i]);
    11243  }
    11244 }
    11245 
    11246 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11247 {
    11248  AllocationInfo allocInfo;
    11249  allocInfo.m_hAllocation = hAlloc;
    11250  allocInfo.m_pChanged = pChanged;
    11251  m_Allocations.push_back(allocInfo);
    11252 }
    11253 
    11254 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11255 {
    11256  // It has already been mapped for defragmentation.
    11257  if(m_pMappedDataForDefragmentation)
    11258  {
    11259  *ppMappedData = m_pMappedDataForDefragmentation;
    11260  return VK_SUCCESS;
    11261  }
    11262 
    11263  // It is originally mapped.
    11264  if(m_pBlock->GetMappedData())
    11265  {
    11266  *ppMappedData = m_pBlock->GetMappedData();
    11267  return VK_SUCCESS;
    11268  }
    11269 
    11270  // Map on first usage.
    11271  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11272  *ppMappedData = m_pMappedDataForDefragmentation;
    11273  return res;
    11274 }
    11275 
    11276 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11277 {
    11278  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11279  {
    11280  m_pBlock->Unmap(hAllocator, 1);
    11281  }
    11282 }
    11283 
    11284 VkResult VmaDefragmentator::DefragmentRound(
    11285  VkDeviceSize maxBytesToMove,
    11286  uint32_t maxAllocationsToMove)
    11287 {
    11288  if(m_Blocks.empty())
    11289  {
    11290  return VK_SUCCESS;
    11291  }
    11292 
    11293  size_t srcBlockIndex = m_Blocks.size() - 1;
    11294  size_t srcAllocIndex = SIZE_MAX;
    11295  for(;;)
    11296  {
    11297  // 1. Find next allocation to move.
    11298  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11299  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11300  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11301  {
    11302  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11303  {
    11304  // Finished: no more allocations to process.
    11305  if(srcBlockIndex == 0)
    11306  {
    11307  return VK_SUCCESS;
    11308  }
    11309  else
    11310  {
    11311  --srcBlockIndex;
    11312  srcAllocIndex = SIZE_MAX;
    11313  }
    11314  }
    11315  else
    11316  {
    11317  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11318  }
    11319  }
    11320 
    11321  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11322  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11323 
    11324  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11325  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11326  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11327  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11328 
    11329  // 2. Try to find new place for this allocation in preceding or current block.
    11330  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11331  {
    11332  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11333  VmaAllocationRequest dstAllocRequest;
    11334  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11335  m_CurrentFrameIndex,
    11336  m_pBlockVector->GetFrameInUseCount(),
    11337  m_pBlockVector->GetBufferImageGranularity(),
    11338  size,
    11339  alignment,
    11340  false, // upperAddress
    11341  suballocType,
    11342  false, // canMakeOtherLost
    11344  &dstAllocRequest) &&
    11345  MoveMakesSense(
    11346  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11347  {
    11348  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11349 
    11350  // Reached limit on number of allocations or bytes to move.
    11351  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11352  (m_BytesMoved + size > maxBytesToMove))
    11353  {
    11354  return VK_INCOMPLETE;
    11355  }
    11356 
    11357  void* pDstMappedData = VMA_NULL;
    11358  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11359  if(res != VK_SUCCESS)
    11360  {
    11361  return res;
    11362  }
    11363 
    11364  void* pSrcMappedData = VMA_NULL;
    11365  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11366  if(res != VK_SUCCESS)
    11367  {
    11368  return res;
    11369  }
    11370 
    11371  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11372  memcpy(
    11373  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11374  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11375  static_cast<size_t>(size));
    11376 
    11377  if(VMA_DEBUG_MARGIN > 0)
    11378  {
    11379  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11380  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11381  }
    11382 
    11383  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11384  dstAllocRequest,
    11385  suballocType,
    11386  size,
    11387  false, // upperAddress
    11388  allocInfo.m_hAllocation);
    11389  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11390 
    11391  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11392 
    11393  if(allocInfo.m_pChanged != VMA_NULL)
    11394  {
    11395  *allocInfo.m_pChanged = VK_TRUE;
    11396  }
    11397 
    11398  ++m_AllocationsMoved;
    11399  m_BytesMoved += size;
    11400 
    11401  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11402 
    11403  break;
    11404  }
    11405  }
    11406 
    11407  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11408 
    11409  if(srcAllocIndex > 0)
    11410  {
    11411  --srcAllocIndex;
    11412  }
    11413  else
    11414  {
    11415  if(srcBlockIndex > 0)
    11416  {
    11417  --srcBlockIndex;
    11418  srcAllocIndex = SIZE_MAX;
    11419  }
    11420  else
    11421  {
    11422  return VK_SUCCESS;
    11423  }
    11424  }
    11425  }
    11426 }
    11427 
    11428 VkResult VmaDefragmentator::Defragment(
    11429  VkDeviceSize maxBytesToMove,
    11430  uint32_t maxAllocationsToMove)
    11431 {
    11432  if(m_Allocations.empty())
    11433  {
    11434  return VK_SUCCESS;
    11435  }
    11436 
    11437  // Create block info for each block.
    11438  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11439  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11440  {
    11441  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11442  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11443  m_Blocks.push_back(pBlockInfo);
    11444  }
    11445 
    11446  // Sort them by m_pBlock pointer value.
    11447  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11448 
    11449  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11450  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11451  {
    11452  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11453  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11454  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11455  {
    11456  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11457  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11458  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11459  {
    11460  (*it)->m_Allocations.push_back(allocInfo);
    11461  }
    11462  else
    11463  {
    11464  VMA_ASSERT(0);
    11465  }
    11466  }
    11467  }
    11468  m_Allocations.clear();
    11469 
    11470  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11471  {
    11472  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11473  pBlockInfo->CalcHasNonMovableAllocations();
    11474  pBlockInfo->SortAllocationsBySizeDescecnding();
    11475  }
    11476 
    11477  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11478  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11479 
    11480  // Execute defragmentation rounds (the main part).
    11481  VkResult result = VK_SUCCESS;
    11482  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11483  {
    11484  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11485  }
    11486 
    11487  // Unmap blocks that were mapped for defragmentation.
    11488  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11489  {
    11490  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11491  }
    11492 
    11493  return result;
    11494 }
    11495 
    11496 bool VmaDefragmentator::MoveMakesSense(
    11497  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11498  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11499 {
    11500  if(dstBlockIndex < srcBlockIndex)
    11501  {
    11502  return true;
    11503  }
    11504  if(dstBlockIndex > srcBlockIndex)
    11505  {
    11506  return false;
    11507  }
    11508  if(dstOffset < srcOffset)
    11509  {
    11510  return true;
    11511  }
    11512  return false;
    11513 }
    11514 
    11516 // VmaRecorder
    11517 
    11518 #if VMA_RECORDING_ENABLED
    11519 
    11520 VmaRecorder::VmaRecorder() :
    11521  m_UseMutex(true),
    11522  m_Flags(0),
    11523  m_File(VMA_NULL),
    11524  m_Freq(INT64_MAX),
    11525  m_StartCounter(INT64_MAX)
    11526 {
    11527 }
    11528 
    11529 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11530 {
    11531  m_UseMutex = useMutex;
    11532  m_Flags = settings.flags;
    11533 
    11534  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11535  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11536 
    11537  // Open file for writing.
    11538  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11539  if(err != 0)
    11540  {
    11541  return VK_ERROR_INITIALIZATION_FAILED;
    11542  }
    11543 
    11544  // Write header.
    11545  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11546  fprintf(m_File, "%s\n", "1,4");
    11547 
    11548  return VK_SUCCESS;
    11549 }
    11550 
    11551 VmaRecorder::~VmaRecorder()
    11552 {
    11553  if(m_File != VMA_NULL)
    11554  {
    11555  fclose(m_File);
    11556  }
    11557 }
    11558 
    11559 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11560 {
    11561  CallParams callParams;
    11562  GetBasicParams(callParams);
    11563 
    11564  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11565  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11566  Flush();
    11567 }
    11568 
    11569 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11570 {
    11571  CallParams callParams;
    11572  GetBasicParams(callParams);
    11573 
    11574  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11575  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11576  Flush();
    11577 }
    11578 
    11579 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11580 {
    11581  CallParams callParams;
    11582  GetBasicParams(callParams);
    11583 
    11584  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11585  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11586  createInfo.memoryTypeIndex,
    11587  createInfo.flags,
    11588  createInfo.blockSize,
    11589  (uint64_t)createInfo.minBlockCount,
    11590  (uint64_t)createInfo.maxBlockCount,
    11591  createInfo.frameInUseCount,
    11592  pool);
    11593  Flush();
    11594 }
    11595 
    11596 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11597 {
    11598  CallParams callParams;
    11599  GetBasicParams(callParams);
    11600 
    11601  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11602  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11603  pool);
    11604  Flush();
    11605 }
    11606 
    11607 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11608  const VkMemoryRequirements& vkMemReq,
    11609  const VmaAllocationCreateInfo& createInfo,
    11610  VmaAllocation allocation)
    11611 {
    11612  CallParams callParams;
    11613  GetBasicParams(callParams);
    11614 
    11615  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11616  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11617  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11618  vkMemReq.size,
    11619  vkMemReq.alignment,
    11620  vkMemReq.memoryTypeBits,
    11621  createInfo.flags,
    11622  createInfo.usage,
    11623  createInfo.requiredFlags,
    11624  createInfo.preferredFlags,
    11625  createInfo.memoryTypeBits,
    11626  createInfo.pool,
    11627  allocation,
    11628  userDataStr.GetString());
    11629  Flush();
    11630 }
    11631 
    11632 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11633  const VkMemoryRequirements& vkMemReq,
    11634  bool requiresDedicatedAllocation,
    11635  bool prefersDedicatedAllocation,
    11636  const VmaAllocationCreateInfo& createInfo,
    11637  VmaAllocation allocation)
    11638 {
    11639  CallParams callParams;
    11640  GetBasicParams(callParams);
    11641 
    11642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11643  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11644  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11645  vkMemReq.size,
    11646  vkMemReq.alignment,
    11647  vkMemReq.memoryTypeBits,
    11648  requiresDedicatedAllocation ? 1 : 0,
    11649  prefersDedicatedAllocation ? 1 : 0,
    11650  createInfo.flags,
    11651  createInfo.usage,
    11652  createInfo.requiredFlags,
    11653  createInfo.preferredFlags,
    11654  createInfo.memoryTypeBits,
    11655  createInfo.pool,
    11656  allocation,
    11657  userDataStr.GetString());
    11658  Flush();
    11659 }
    11660 
    11661 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11662  const VkMemoryRequirements& vkMemReq,
    11663  bool requiresDedicatedAllocation,
    11664  bool prefersDedicatedAllocation,
    11665  const VmaAllocationCreateInfo& createInfo,
    11666  VmaAllocation allocation)
    11667 {
    11668  CallParams callParams;
    11669  GetBasicParams(callParams);
    11670 
    11671  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11672  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11673  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11674  vkMemReq.size,
    11675  vkMemReq.alignment,
    11676  vkMemReq.memoryTypeBits,
    11677  requiresDedicatedAllocation ? 1 : 0,
    11678  prefersDedicatedAllocation ? 1 : 0,
    11679  createInfo.flags,
    11680  createInfo.usage,
    11681  createInfo.requiredFlags,
    11682  createInfo.preferredFlags,
    11683  createInfo.memoryTypeBits,
    11684  createInfo.pool,
    11685  allocation,
    11686  userDataStr.GetString());
    11687  Flush();
    11688 }
    11689 
    11690 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11691  VmaAllocation allocation)
    11692 {
    11693  CallParams callParams;
    11694  GetBasicParams(callParams);
    11695 
    11696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11697  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11698  allocation);
    11699  Flush();
    11700 }
    11701 
    11702 void VmaRecorder::RecordResizeAllocation(
    11703  uint32_t frameIndex,
    11704  VmaAllocation allocation,
    11705  VkDeviceSize newSize)
    11706 {
    11707  CallParams callParams;
    11708  GetBasicParams(callParams);
    11709 
    11710  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11711  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11712  allocation, newSize);
    11713  Flush();
    11714 }
    11715 
    11716 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11717  VmaAllocation allocation,
    11718  const void* pUserData)
    11719 {
    11720  CallParams callParams;
    11721  GetBasicParams(callParams);
    11722 
    11723  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11724  UserDataString userDataStr(
    11725  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11726  pUserData);
    11727  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11728  allocation,
    11729  userDataStr.GetString());
    11730  Flush();
    11731 }
    11732 
    11733 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11734  VmaAllocation allocation)
    11735 {
    11736  CallParams callParams;
    11737  GetBasicParams(callParams);
    11738 
    11739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11740  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11741  allocation);
    11742  Flush();
    11743 }
    11744 
    11745 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11746  VmaAllocation allocation)
    11747 {
    11748  CallParams callParams;
    11749  GetBasicParams(callParams);
    11750 
    11751  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11752  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11753  allocation);
    11754  Flush();
    11755 }
    11756 
    11757 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11758  VmaAllocation allocation)
    11759 {
    11760  CallParams callParams;
    11761  GetBasicParams(callParams);
    11762 
    11763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11764  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11765  allocation);
    11766  Flush();
    11767 }
    11768 
    11769 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11770  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11771 {
    11772  CallParams callParams;
    11773  GetBasicParams(callParams);
    11774 
    11775  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11776  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11777  allocation,
    11778  offset,
    11779  size);
    11780  Flush();
    11781 }
    11782 
    11783 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11784  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11785 {
    11786  CallParams callParams;
    11787  GetBasicParams(callParams);
    11788 
    11789  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11790  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11791  allocation,
    11792  offset,
    11793  size);
    11794  Flush();
    11795 }
    11796 
    11797 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11798  const VkBufferCreateInfo& bufCreateInfo,
    11799  const VmaAllocationCreateInfo& allocCreateInfo,
    11800  VmaAllocation allocation)
    11801 {
    11802  CallParams callParams;
    11803  GetBasicParams(callParams);
    11804 
    11805  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11806  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11807  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11808  bufCreateInfo.flags,
    11809  bufCreateInfo.size,
    11810  bufCreateInfo.usage,
    11811  bufCreateInfo.sharingMode,
    11812  allocCreateInfo.flags,
    11813  allocCreateInfo.usage,
    11814  allocCreateInfo.requiredFlags,
    11815  allocCreateInfo.preferredFlags,
    11816  allocCreateInfo.memoryTypeBits,
    11817  allocCreateInfo.pool,
    11818  allocation,
    11819  userDataStr.GetString());
    11820  Flush();
    11821 }
    11822 
    11823 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11824  const VkImageCreateInfo& imageCreateInfo,
    11825  const VmaAllocationCreateInfo& allocCreateInfo,
    11826  VmaAllocation allocation)
    11827 {
    11828  CallParams callParams;
    11829  GetBasicParams(callParams);
    11830 
    11831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11832  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11833  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11834  imageCreateInfo.flags,
    11835  imageCreateInfo.imageType,
    11836  imageCreateInfo.format,
    11837  imageCreateInfo.extent.width,
    11838  imageCreateInfo.extent.height,
    11839  imageCreateInfo.extent.depth,
    11840  imageCreateInfo.mipLevels,
    11841  imageCreateInfo.arrayLayers,
    11842  imageCreateInfo.samples,
    11843  imageCreateInfo.tiling,
    11844  imageCreateInfo.usage,
    11845  imageCreateInfo.sharingMode,
    11846  imageCreateInfo.initialLayout,
    11847  allocCreateInfo.flags,
    11848  allocCreateInfo.usage,
    11849  allocCreateInfo.requiredFlags,
    11850  allocCreateInfo.preferredFlags,
    11851  allocCreateInfo.memoryTypeBits,
    11852  allocCreateInfo.pool,
    11853  allocation,
    11854  userDataStr.GetString());
    11855  Flush();
    11856 }
    11857 
    11858 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11859  VmaAllocation allocation)
    11860 {
    11861  CallParams callParams;
    11862  GetBasicParams(callParams);
    11863 
    11864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11865  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11866  allocation);
    11867  Flush();
    11868 }
    11869 
    11870 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11871  VmaAllocation allocation)
    11872 {
    11873  CallParams callParams;
    11874  GetBasicParams(callParams);
    11875 
    11876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11877  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11878  allocation);
    11879  Flush();
    11880 }
    11881 
    11882 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11883  VmaAllocation allocation)
    11884 {
    11885  CallParams callParams;
    11886  GetBasicParams(callParams);
    11887 
    11888  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11889  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11890  allocation);
    11891  Flush();
    11892 }
    11893 
    11894 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11895  VmaAllocation allocation)
    11896 {
    11897  CallParams callParams;
    11898  GetBasicParams(callParams);
    11899 
    11900  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11901  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11902  allocation);
    11903  Flush();
    11904 }
    11905 
    11906 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11907  VmaPool pool)
    11908 {
    11909  CallParams callParams;
    11910  GetBasicParams(callParams);
    11911 
    11912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11913  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11914  pool);
    11915  Flush();
    11916 }
    11917 
    11918 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11919 {
    11920  if(pUserData != VMA_NULL)
    11921  {
    11922  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11923  {
    11924  m_Str = (const char*)pUserData;
    11925  }
    11926  else
    11927  {
    11928  sprintf_s(m_PtrStr, "%p", pUserData);
    11929  m_Str = m_PtrStr;
    11930  }
    11931  }
    11932  else
    11933  {
    11934  m_Str = "";
    11935  }
    11936 }
    11937 
    11938 void VmaRecorder::WriteConfiguration(
    11939  const VkPhysicalDeviceProperties& devProps,
    11940  const VkPhysicalDeviceMemoryProperties& memProps,
    11941  bool dedicatedAllocationExtensionEnabled)
    11942 {
    11943  fprintf(m_File, "Config,Begin\n");
    11944 
    11945  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11946  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11947  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11948  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11949  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11950  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11951 
    11952  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11953  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11954  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11955 
    11956  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11957  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11958  {
    11959  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11960  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11961  }
    11962  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11963  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11964  {
    11965  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11966  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11967  }
    11968 
    11969  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11970 
    11971  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11972  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11973  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11974  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11978  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11979  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11980 
    11981  fprintf(m_File, "Config,End\n");
    11982 }
    11983 
    11984 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11985 {
    11986  outParams.threadId = GetCurrentThreadId();
    11987 
    11988  LARGE_INTEGER counter;
    11989  QueryPerformanceCounter(&counter);
    11990  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11991 }
    11992 
    11993 void VmaRecorder::Flush()
    11994 {
    11995  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11996  {
    11997  fflush(m_File);
    11998  }
    11999 }
    12000 
    12001 #endif // #if VMA_RECORDING_ENABLED
    12002 
    12004 // VmaAllocator_T
    12005 
    12006 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12007  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12008  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12009  m_hDevice(pCreateInfo->device),
    12010  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12011  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12012  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12013  m_PreferredLargeHeapBlockSize(0),
    12014  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12015  m_CurrentFrameIndex(0),
    12016  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12017  m_NextPoolId(0)
    12019  ,m_pRecorder(VMA_NULL)
    12020 #endif
    12021 {
    12022  if(VMA_DEBUG_DETECT_CORRUPTION)
    12023  {
    12024  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12025  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12026  }
    12027 
    12028  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12029 
    12030 #if !(VMA_DEDICATED_ALLOCATION)
    12032  {
    12033  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12034  }
    12035 #endif
    12036 
    12037  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12038  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12039  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12040 
    12041  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12042  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12043 
    12044  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12045  {
    12046  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12047  }
    12048 
    12049  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12050  {
    12051  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12052  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12053  }
    12054 
    12055  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12056 
    12057  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12058  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12059 
    12060  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12061  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12062  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12063  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12064 
    12065  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12066  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12067 
    12068  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12069  {
    12070  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12071  {
    12072  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12073  if(limit != VK_WHOLE_SIZE)
    12074  {
    12075  m_HeapSizeLimit[heapIndex] = limit;
    12076  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12077  {
    12078  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12079  }
    12080  }
    12081  }
    12082  }
    12083 
    12084  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12085  {
    12086  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12087 
    12088  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12089  this,
    12090  memTypeIndex,
    12091  preferredBlockSize,
    12092  0,
    12093  SIZE_MAX,
    12094  GetBufferImageGranularity(),
    12095  pCreateInfo->frameInUseCount,
    12096  false, // isCustomPool
    12097  false, // explicitBlockSize
    12098  false); // linearAlgorithm
    12099  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12100  // becase minBlockCount is 0.
    12101  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12102 
    12103  }
    12104 }
    12105 
    12106 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12107 {
    12108  VkResult res = VK_SUCCESS;
    12109 
    12110  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12111  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12112  {
    12113 #if VMA_RECORDING_ENABLED
    12114  m_pRecorder = vma_new(this, VmaRecorder)();
    12115  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12116  if(res != VK_SUCCESS)
    12117  {
    12118  return res;
    12119  }
    12120  m_pRecorder->WriteConfiguration(
    12121  m_PhysicalDeviceProperties,
    12122  m_MemProps,
    12123  m_UseKhrDedicatedAllocation);
    12124  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12125 #else
    12126  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12127  return VK_ERROR_FEATURE_NOT_PRESENT;
    12128 #endif
    12129  }
    12130 
    12131  return res;
    12132 }
    12133 
    12134 VmaAllocator_T::~VmaAllocator_T()
    12135 {
    12136 #if VMA_RECORDING_ENABLED
    12137  if(m_pRecorder != VMA_NULL)
    12138  {
    12139  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12140  vma_delete(this, m_pRecorder);
    12141  }
    12142 #endif
    12143 
    12144  VMA_ASSERT(m_Pools.empty());
    12145 
    12146  for(size_t i = GetMemoryTypeCount(); i--; )
    12147  {
    12148  vma_delete(this, m_pDedicatedAllocations[i]);
    12149  vma_delete(this, m_pBlockVectors[i]);
    12150  }
    12151 }
    12152 
    12153 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12154 {
    12155 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12156  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12157  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12158  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12159  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12160  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12161  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12162  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12163  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12164  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12165  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12166  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12167  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12168  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12169  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12170  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12171  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12172 #if VMA_DEDICATED_ALLOCATION
    12173  if(m_UseKhrDedicatedAllocation)
    12174  {
    12175  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12176  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12177  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12178  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12179  }
    12180 #endif // #if VMA_DEDICATED_ALLOCATION
    12181 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12182 
    12183 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12184  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12185 
    12186  if(pVulkanFunctions != VMA_NULL)
    12187  {
    12188  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12189  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12190  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12191  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12192  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12193  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12194  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12195  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12196  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12199  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12200  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12201  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12202  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12203  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12204 #if VMA_DEDICATED_ALLOCATION
    12205  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12206  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12207 #endif
    12208  }
    12209 
    12210 #undef VMA_COPY_IF_NOT_NULL
    12211 
    12212  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12213  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12214  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12215  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12216  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12217  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12230 #if VMA_DEDICATED_ALLOCATION
    12231  if(m_UseKhrDedicatedAllocation)
    12232  {
    12233  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12234  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12235  }
    12236 #endif
    12237 }
    12238 
    12239 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12240 {
    12241  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12242  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12243  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12244  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12245 }
    12246 
    12247 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12248  VkDeviceSize size,
    12249  VkDeviceSize alignment,
    12250  bool dedicatedAllocation,
    12251  VkBuffer dedicatedBuffer,
    12252  VkImage dedicatedImage,
    12253  const VmaAllocationCreateInfo& createInfo,
    12254  uint32_t memTypeIndex,
    12255  VmaSuballocationType suballocType,
    12256  VmaAllocation* pAllocation)
    12257 {
    12258  VMA_ASSERT(pAllocation != VMA_NULL);
    12259  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12260 
    12261  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12262 
    12263  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12264  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12265  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12266  {
    12267  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12268  }
    12269 
    12270  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12271  VMA_ASSERT(blockVector);
    12272 
    12273  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12274  bool preferDedicatedMemory =
    12275  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12276  dedicatedAllocation ||
    12277  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12278  size > preferredBlockSize / 2;
    12279 
    12280  if(preferDedicatedMemory &&
    12281  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12282  finalCreateInfo.pool == VK_NULL_HANDLE)
    12283  {
    12285  }
    12286 
    12287  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12288  {
    12289  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12290  {
    12291  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12292  }
    12293  else
    12294  {
    12295  return AllocateDedicatedMemory(
    12296  size,
    12297  suballocType,
    12298  memTypeIndex,
    12299  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12300  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12301  finalCreateInfo.pUserData,
    12302  dedicatedBuffer,
    12303  dedicatedImage,
    12304  pAllocation);
    12305  }
    12306  }
    12307  else
    12308  {
    12309  VkResult res = blockVector->Allocate(
    12310  VK_NULL_HANDLE, // hCurrentPool
    12311  m_CurrentFrameIndex.load(),
    12312  size,
    12313  alignment,
    12314  finalCreateInfo,
    12315  suballocType,
    12316  pAllocation);
    12317  if(res == VK_SUCCESS)
    12318  {
    12319  return res;
    12320  }
    12321 
    12322  // 5. Try dedicated memory.
    12323  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12324  {
    12325  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12326  }
    12327  else
    12328  {
    12329  res = AllocateDedicatedMemory(
    12330  size,
    12331  suballocType,
    12332  memTypeIndex,
    12333  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12334  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12335  finalCreateInfo.pUserData,
    12336  dedicatedBuffer,
    12337  dedicatedImage,
    12338  pAllocation);
    12339  if(res == VK_SUCCESS)
    12340  {
    12341  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12342  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12343  return VK_SUCCESS;
    12344  }
    12345  else
    12346  {
    12347  // Everything failed: Return error code.
    12348  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12349  return res;
    12350  }
    12351  }
    12352  }
    12353 }
    12354 
    12355 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12356  VkDeviceSize size,
    12357  VmaSuballocationType suballocType,
    12358  uint32_t memTypeIndex,
    12359  bool map,
    12360  bool isUserDataString,
    12361  void* pUserData,
    12362  VkBuffer dedicatedBuffer,
    12363  VkImage dedicatedImage,
    12364  VmaAllocation* pAllocation)
    12365 {
    12366  VMA_ASSERT(pAllocation);
    12367 
    12368  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12369  allocInfo.memoryTypeIndex = memTypeIndex;
    12370  allocInfo.allocationSize = size;
    12371 
    12372 #if VMA_DEDICATED_ALLOCATION
    12373  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12374  if(m_UseKhrDedicatedAllocation)
    12375  {
    12376  if(dedicatedBuffer != VK_NULL_HANDLE)
    12377  {
    12378  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12379  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12380  allocInfo.pNext = &dedicatedAllocInfo;
    12381  }
    12382  else if(dedicatedImage != VK_NULL_HANDLE)
    12383  {
    12384  dedicatedAllocInfo.image = dedicatedImage;
    12385  allocInfo.pNext = &dedicatedAllocInfo;
    12386  }
    12387  }
    12388 #endif // #if VMA_DEDICATED_ALLOCATION
    12389 
    12390  // Allocate VkDeviceMemory.
    12391  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12392  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12393  if(res < 0)
    12394  {
    12395  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12396  return res;
    12397  }
    12398 
    12399  void* pMappedData = VMA_NULL;
    12400  if(map)
    12401  {
    12402  res = (*m_VulkanFunctions.vkMapMemory)(
    12403  m_hDevice,
    12404  hMemory,
    12405  0,
    12406  VK_WHOLE_SIZE,
    12407  0,
    12408  &pMappedData);
    12409  if(res < 0)
    12410  {
    12411  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12412  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12413  return res;
    12414  }
    12415  }
    12416 
    12417  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12418  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12419  (*pAllocation)->SetUserData(this, pUserData);
    12420  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12421  {
    12422  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12423  }
    12424 
    12425  // Register it in m_pDedicatedAllocations.
    12426  {
    12427  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12428  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12429  VMA_ASSERT(pDedicatedAllocations);
    12430  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12431  }
    12432 
    12433  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12434 
    12435  return VK_SUCCESS;
    12436 }
    12437 
    12438 void VmaAllocator_T::GetBufferMemoryRequirements(
    12439  VkBuffer hBuffer,
    12440  VkMemoryRequirements& memReq,
    12441  bool& requiresDedicatedAllocation,
    12442  bool& prefersDedicatedAllocation) const
    12443 {
    12444 #if VMA_DEDICATED_ALLOCATION
    12445  if(m_UseKhrDedicatedAllocation)
    12446  {
    12447  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12448  memReqInfo.buffer = hBuffer;
    12449 
    12450  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12451 
    12452  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12453  memReq2.pNext = &memDedicatedReq;
    12454 
    12455  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12456 
    12457  memReq = memReq2.memoryRequirements;
    12458  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12459  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12460  }
    12461  else
    12462 #endif // #if VMA_DEDICATED_ALLOCATION
    12463  {
    12464  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12465  requiresDedicatedAllocation = false;
    12466  prefersDedicatedAllocation = false;
    12467  }
    12468 }
    12469 
    12470 void VmaAllocator_T::GetImageMemoryRequirements(
    12471  VkImage hImage,
    12472  VkMemoryRequirements& memReq,
    12473  bool& requiresDedicatedAllocation,
    12474  bool& prefersDedicatedAllocation) const
    12475 {
    12476 #if VMA_DEDICATED_ALLOCATION
    12477  if(m_UseKhrDedicatedAllocation)
    12478  {
    12479  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12480  memReqInfo.image = hImage;
    12481 
    12482  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12483 
    12484  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12485  memReq2.pNext = &memDedicatedReq;
    12486 
    12487  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12488 
    12489  memReq = memReq2.memoryRequirements;
    12490  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12491  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12492  }
    12493  else
    12494 #endif // #if VMA_DEDICATED_ALLOCATION
    12495  {
    12496  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12497  requiresDedicatedAllocation = false;
    12498  prefersDedicatedAllocation = false;
    12499  }
    12500 }
    12501 
    12502 VkResult VmaAllocator_T::AllocateMemory(
    12503  const VkMemoryRequirements& vkMemReq,
    12504  bool requiresDedicatedAllocation,
    12505  bool prefersDedicatedAllocation,
    12506  VkBuffer dedicatedBuffer,
    12507  VkImage dedicatedImage,
    12508  const VmaAllocationCreateInfo& createInfo,
    12509  VmaSuballocationType suballocType,
    12510  VmaAllocation* pAllocation)
    12511 {
    12512  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12513 
    12514  if(vkMemReq.size == 0)
    12515  {
    12516  return VK_ERROR_VALIDATION_FAILED_EXT;
    12517  }
    12518  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12519  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12520  {
    12521  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12522  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12523  }
    12524  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12526  {
    12527  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12528  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12529  }
    12530  if(requiresDedicatedAllocation)
    12531  {
    12532  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12533  {
    12534  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12535  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12536  }
    12537  if(createInfo.pool != VK_NULL_HANDLE)
    12538  {
    12539  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12540  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12541  }
    12542  }
    12543  if((createInfo.pool != VK_NULL_HANDLE) &&
    12544  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12545  {
    12546  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12547  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12548  }
    12549 
    12550  if(createInfo.pool != VK_NULL_HANDLE)
    12551  {
    12552  const VkDeviceSize alignmentForPool = VMA_MAX(
    12553  vkMemReq.alignment,
    12554  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12555  return createInfo.pool->m_BlockVector.Allocate(
    12556  createInfo.pool,
    12557  m_CurrentFrameIndex.load(),
    12558  vkMemReq.size,
    12559  alignmentForPool,
    12560  createInfo,
    12561  suballocType,
    12562  pAllocation);
    12563  }
    12564  else
    12565  {
    12566  // Bit mask of memory Vulkan types acceptable for this allocation.
    12567  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12568  uint32_t memTypeIndex = UINT32_MAX;
    12569  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12570  if(res == VK_SUCCESS)
    12571  {
    12572  VkDeviceSize alignmentForMemType = VMA_MAX(
    12573  vkMemReq.alignment,
    12574  GetMemoryTypeMinAlignment(memTypeIndex));
    12575 
    12576  res = AllocateMemoryOfType(
    12577  vkMemReq.size,
    12578  alignmentForMemType,
    12579  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12580  dedicatedBuffer,
    12581  dedicatedImage,
    12582  createInfo,
    12583  memTypeIndex,
    12584  suballocType,
    12585  pAllocation);
    12586  // Succeeded on first try.
    12587  if(res == VK_SUCCESS)
    12588  {
    12589  return res;
    12590  }
    12591  // Allocation from this memory type failed. Try other compatible memory types.
    12592  else
    12593  {
    12594  for(;;)
    12595  {
    12596  // Remove old memTypeIndex from list of possibilities.
    12597  memoryTypeBits &= ~(1u << memTypeIndex);
    12598  // Find alternative memTypeIndex.
    12599  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12600  if(res == VK_SUCCESS)
    12601  {
    12602  alignmentForMemType = VMA_MAX(
    12603  vkMemReq.alignment,
    12604  GetMemoryTypeMinAlignment(memTypeIndex));
    12605 
    12606  res = AllocateMemoryOfType(
    12607  vkMemReq.size,
    12608  alignmentForMemType,
    12609  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12610  dedicatedBuffer,
    12611  dedicatedImage,
    12612  createInfo,
    12613  memTypeIndex,
    12614  suballocType,
    12615  pAllocation);
    12616  // Allocation from this alternative memory type succeeded.
    12617  if(res == VK_SUCCESS)
    12618  {
    12619  return res;
    12620  }
    12621  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12622  }
    12623  // No other matching memory type index could be found.
    12624  else
    12625  {
    12626  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12627  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12628  }
    12629  }
    12630  }
    12631  }
    12632  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12633  else
    12634  return res;
    12635  }
    12636 }
    12637 
    12638 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12639 {
    12640  VMA_ASSERT(allocation);
    12641 
    12642  if(TouchAllocation(allocation))
    12643  {
    12644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12645  {
    12646  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12647  }
    12648 
    12649  switch(allocation->GetType())
    12650  {
    12651  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12652  {
    12653  VmaBlockVector* pBlockVector = VMA_NULL;
    12654  VmaPool hPool = allocation->GetPool();
    12655  if(hPool != VK_NULL_HANDLE)
    12656  {
    12657  pBlockVector = &hPool->m_BlockVector;
    12658  }
    12659  else
    12660  {
    12661  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12662  pBlockVector = m_pBlockVectors[memTypeIndex];
    12663  }
    12664  pBlockVector->Free(allocation);
    12665  }
    12666  break;
    12667  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12668  FreeDedicatedMemory(allocation);
    12669  break;
    12670  default:
    12671  VMA_ASSERT(0);
    12672  }
    12673  }
    12674 
    12675  allocation->SetUserData(this, VMA_NULL);
    12676  vma_delete(this, allocation);
    12677 }
    12678 
    12679 VkResult VmaAllocator_T::ResizeAllocation(
    12680  const VmaAllocation alloc,
    12681  VkDeviceSize newSize)
    12682 {
    12683  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12684  {
    12685  return VK_ERROR_VALIDATION_FAILED_EXT;
    12686  }
    12687  if(newSize == alloc->GetSize())
    12688  {
    12689  return VK_SUCCESS;
    12690  }
    12691 
    12692  switch(alloc->GetType())
    12693  {
    12694  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12695  return VK_ERROR_FEATURE_NOT_PRESENT;
    12696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12697  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12698  {
    12699  alloc->ChangeSize(newSize);
    12700  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12701  return VK_SUCCESS;
    12702  }
    12703  else
    12704  {
    12705  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12706  }
    12707  default:
    12708  VMA_ASSERT(0);
    12709  return VK_ERROR_VALIDATION_FAILED_EXT;
    12710  }
    12711 }
    12712 
    12713 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12714 {
    12715  // Initialize.
    12716  InitStatInfo(pStats->total);
    12717  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12718  InitStatInfo(pStats->memoryType[i]);
    12719  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12720  InitStatInfo(pStats->memoryHeap[i]);
    12721 
    12722  // Process default pools.
    12723  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12724  {
    12725  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12726  VMA_ASSERT(pBlockVector);
    12727  pBlockVector->AddStats(pStats);
    12728  }
    12729 
    12730  // Process custom pools.
    12731  {
    12732  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12733  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12734  {
    12735  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12736  }
    12737  }
    12738 
    12739  // Process dedicated allocations.
    12740  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12741  {
    12742  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12743  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12744  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12745  VMA_ASSERT(pDedicatedAllocVector);
    12746  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12747  {
    12748  VmaStatInfo allocationStatInfo;
    12749  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12750  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12751  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12752  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12753  }
    12754  }
    12755 
    12756  // Postprocess.
    12757  VmaPostprocessCalcStatInfo(pStats->total);
    12758  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12759  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12760  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12761  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12762 }
    12763 
    12764 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12765 
    12766 VkResult VmaAllocator_T::Defragment(
    12767  VmaAllocation* pAllocations,
    12768  size_t allocationCount,
    12769  VkBool32* pAllocationsChanged,
    12770  const VmaDefragmentationInfo* pDefragmentationInfo,
    12771  VmaDefragmentationStats* pDefragmentationStats)
    12772 {
    12773  if(pAllocationsChanged != VMA_NULL)
    12774  {
    12775  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12776  }
    12777  if(pDefragmentationStats != VMA_NULL)
    12778  {
    12779  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12780  }
    12781 
    12782  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12783 
    12784  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12785 
    12786  const size_t poolCount = m_Pools.size();
    12787 
    12788  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12789  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12790  {
    12791  VmaAllocation hAlloc = pAllocations[allocIndex];
    12792  VMA_ASSERT(hAlloc);
    12793  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12794  // DedicatedAlloc cannot be defragmented.
    12795  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12796  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12797  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12798  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12799  // Lost allocation cannot be defragmented.
    12800  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12801  {
    12802  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12803 
    12804  const VmaPool hAllocPool = hAlloc->GetPool();
    12805  // This allocation belongs to custom pool.
    12806  if(hAllocPool != VK_NULL_HANDLE)
    12807  {
    12808  // Pools with linear or buddy algorithm are not defragmented.
    12809  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12810  {
    12811  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12812  }
    12813  }
    12814  // This allocation belongs to general pool.
    12815  else
    12816  {
    12817  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12818  }
    12819 
    12820  if(pAllocBlockVector != VMA_NULL)
    12821  {
    12822  VmaDefragmentator* const pDefragmentator =
    12823  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12824  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12825  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12826  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12827  }
    12828  }
    12829  }
    12830 
    12831  VkResult result = VK_SUCCESS;
    12832 
    12833  // ======== Main processing.
    12834 
    12835  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12836  uint32_t maxAllocationsToMove = UINT32_MAX;
    12837  if(pDefragmentationInfo != VMA_NULL)
    12838  {
    12839  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12840  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12841  }
    12842 
    12843  // Process standard memory.
    12844  for(uint32_t memTypeIndex = 0;
    12845  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12846  ++memTypeIndex)
    12847  {
    12848  // Only HOST_VISIBLE memory types can be defragmented.
    12849  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12850  {
    12851  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12852  pDefragmentationStats,
    12853  maxBytesToMove,
    12854  maxAllocationsToMove);
    12855  }
    12856  }
    12857 
    12858  // Process custom pools.
    12859  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12860  {
    12861  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12862  pDefragmentationStats,
    12863  maxBytesToMove,
    12864  maxAllocationsToMove);
    12865  }
    12866 
    12867  // ======== Destroy defragmentators.
    12868 
    12869  // Process custom pools.
    12870  for(size_t poolIndex = poolCount; poolIndex--; )
    12871  {
    12872  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12873  }
    12874 
    12875  // Process standard memory.
    12876  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12877  {
    12878  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12879  {
    12880  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12881  }
    12882  }
    12883 
    12884  return result;
    12885 }
    12886 
    12887 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12888 {
    12889  if(hAllocation->CanBecomeLost())
    12890  {
    12891  /*
    12892  Warning: This is a carefully designed algorithm.
    12893  Do not modify unless you really know what you're doing :)
    12894  */
    12895  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12896  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12897  for(;;)
    12898  {
    12899  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12900  {
    12901  pAllocationInfo->memoryType = UINT32_MAX;
    12902  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12903  pAllocationInfo->offset = 0;
    12904  pAllocationInfo->size = hAllocation->GetSize();
    12905  pAllocationInfo->pMappedData = VMA_NULL;
    12906  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12907  return;
    12908  }
    12909  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12910  {
    12911  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12912  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12913  pAllocationInfo->offset = hAllocation->GetOffset();
    12914  pAllocationInfo->size = hAllocation->GetSize();
    12915  pAllocationInfo->pMappedData = VMA_NULL;
    12916  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12917  return;
    12918  }
    12919  else // Last use time earlier than current time.
    12920  {
    12921  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12922  {
    12923  localLastUseFrameIndex = localCurrFrameIndex;
    12924  }
    12925  }
    12926  }
    12927  }
    12928  else
    12929  {
    12930 #if VMA_STATS_STRING_ENABLED
    12931  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12932  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12933  for(;;)
    12934  {
    12935  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12936  if(localLastUseFrameIndex == localCurrFrameIndex)
    12937  {
    12938  break;
    12939  }
    12940  else // Last use time earlier than current time.
    12941  {
    12942  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12943  {
    12944  localLastUseFrameIndex = localCurrFrameIndex;
    12945  }
    12946  }
    12947  }
    12948 #endif
    12949 
    12950  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12951  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12952  pAllocationInfo->offset = hAllocation->GetOffset();
    12953  pAllocationInfo->size = hAllocation->GetSize();
    12954  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12955  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12956  }
    12957 }
    12958 
    12959 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12960 {
    12961  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12962  if(hAllocation->CanBecomeLost())
    12963  {
    12964  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12965  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12966  for(;;)
    12967  {
    12968  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12969  {
    12970  return false;
    12971  }
    12972  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12973  {
    12974  return true;
    12975  }
    12976  else // Last use time earlier than current time.
    12977  {
    12978  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12979  {
    12980  localLastUseFrameIndex = localCurrFrameIndex;
    12981  }
    12982  }
    12983  }
    12984  }
    12985  else
    12986  {
    12987 #if VMA_STATS_STRING_ENABLED
    12988  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12989  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12990  for(;;)
    12991  {
    12992  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12993  if(localLastUseFrameIndex == localCurrFrameIndex)
    12994  {
    12995  break;
    12996  }
    12997  else // Last use time earlier than current time.
    12998  {
    12999  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13000  {
    13001  localLastUseFrameIndex = localCurrFrameIndex;
    13002  }
    13003  }
    13004  }
    13005 #endif
    13006 
    13007  return true;
    13008  }
    13009 }
    13010 
    13011 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13012 {
    13013  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13014 
    13015  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13016 
    13017  if(newCreateInfo.maxBlockCount == 0)
    13018  {
    13019  newCreateInfo.maxBlockCount = SIZE_MAX;
    13020  }
    13021  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13022  {
    13023  return VK_ERROR_INITIALIZATION_FAILED;
    13024  }
    13025 
    13026  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13027 
    13028  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13029 
    13030  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13031  if(res != VK_SUCCESS)
    13032  {
    13033  vma_delete(this, *pPool);
    13034  *pPool = VMA_NULL;
    13035  return res;
    13036  }
    13037 
    13038  // Add to m_Pools.
    13039  {
    13040  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13041  (*pPool)->SetId(m_NextPoolId++);
    13042  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13043  }
    13044 
    13045  return VK_SUCCESS;
    13046 }
    13047 
    13048 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13049 {
    13050  // Remove from m_Pools.
    13051  {
    13052  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13053  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13054  VMA_ASSERT(success && "Pool not found in Allocator.");
    13055  }
    13056 
    13057  vma_delete(this, pool);
    13058 }
    13059 
    13060 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13061 {
    13062  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13063 }
    13064 
    13065 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13066 {
    13067  m_CurrentFrameIndex.store(frameIndex);
    13068 }
    13069 
    13070 void VmaAllocator_T::MakePoolAllocationsLost(
    13071  VmaPool hPool,
    13072  size_t* pLostAllocationCount)
    13073 {
    13074  hPool->m_BlockVector.MakePoolAllocationsLost(
    13075  m_CurrentFrameIndex.load(),
    13076  pLostAllocationCount);
    13077 }
    13078 
    13079 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13080 {
    13081  return hPool->m_BlockVector.CheckCorruption();
    13082 }
    13083 
    13084 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13085 {
    13086  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13087 
    13088  // Process default pools.
    13089  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13090  {
    13091  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13092  {
    13093  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13094  VMA_ASSERT(pBlockVector);
    13095  VkResult localRes = pBlockVector->CheckCorruption();
    13096  switch(localRes)
    13097  {
    13098  case VK_ERROR_FEATURE_NOT_PRESENT:
    13099  break;
    13100  case VK_SUCCESS:
    13101  finalRes = VK_SUCCESS;
    13102  break;
    13103  default:
    13104  return localRes;
    13105  }
    13106  }
    13107  }
    13108 
    13109  // Process custom pools.
    13110  {
    13111  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13112  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13113  {
    13114  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13115  {
    13116  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13117  switch(localRes)
    13118  {
    13119  case VK_ERROR_FEATURE_NOT_PRESENT:
    13120  break;
    13121  case VK_SUCCESS:
    13122  finalRes = VK_SUCCESS;
    13123  break;
    13124  default:
    13125  return localRes;
    13126  }
    13127  }
    13128  }
    13129  }
    13130 
    13131  return finalRes;
    13132 }
    13133 
    13134 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13135 {
    13136  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13137  (*pAllocation)->InitLost();
    13138 }
    13139 
    13140 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13141 {
    13142  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13143 
    13144  VkResult res;
    13145  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13146  {
    13147  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13148  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13149  {
    13150  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13151  if(res == VK_SUCCESS)
    13152  {
    13153  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13154  }
    13155  }
    13156  else
    13157  {
    13158  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13159  }
    13160  }
    13161  else
    13162  {
    13163  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13164  }
    13165 
    13166  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13167  {
    13168  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13169  }
    13170 
    13171  return res;
    13172 }
    13173 
    13174 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13175 {
    13176  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13177  {
    13178  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13179  }
    13180 
    13181  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13182 
    13183  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13184  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13185  {
    13186  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13187  m_HeapSizeLimit[heapIndex] += size;
    13188  }
    13189 }
    13190 
    13191 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13192 {
    13193  if(hAllocation->CanBecomeLost())
    13194  {
    13195  return VK_ERROR_MEMORY_MAP_FAILED;
    13196  }
    13197 
    13198  switch(hAllocation->GetType())
    13199  {
    13200  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13201  {
    13202  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13203  char *pBytes = VMA_NULL;
    13204  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13205  if(res == VK_SUCCESS)
    13206  {
    13207  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13208  hAllocation->BlockAllocMap();
    13209  }
    13210  return res;
    13211  }
    13212  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13213  return hAllocation->DedicatedAllocMap(this, ppData);
    13214  default:
    13215  VMA_ASSERT(0);
    13216  return VK_ERROR_MEMORY_MAP_FAILED;
    13217  }
    13218 }
    13219 
    13220 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13221 {
    13222  switch(hAllocation->GetType())
    13223  {
    13224  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13225  {
    13226  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13227  hAllocation->BlockAllocUnmap();
    13228  pBlock->Unmap(this, 1);
    13229  }
    13230  break;
    13231  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13232  hAllocation->DedicatedAllocUnmap(this);
    13233  break;
    13234  default:
    13235  VMA_ASSERT(0);
    13236  }
    13237 }
    13238 
    13239 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13240 {
    13241  VkResult res = VK_SUCCESS;
    13242  switch(hAllocation->GetType())
    13243  {
    13244  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13245  res = GetVulkanFunctions().vkBindBufferMemory(
    13246  m_hDevice,
    13247  hBuffer,
    13248  hAllocation->GetMemory(),
    13249  0); //memoryOffset
    13250  break;
    13251  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13252  {
    13253  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13254  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13255  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13256  break;
    13257  }
    13258  default:
    13259  VMA_ASSERT(0);
    13260  }
    13261  return res;
    13262 }
    13263 
    13264 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13265 {
    13266  VkResult res = VK_SUCCESS;
    13267  switch(hAllocation->GetType())
    13268  {
    13269  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13270  res = GetVulkanFunctions().vkBindImageMemory(
    13271  m_hDevice,
    13272  hImage,
    13273  hAllocation->GetMemory(),
    13274  0); //memoryOffset
    13275  break;
    13276  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13277  {
    13278  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13279  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13280  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13281  break;
    13282  }
    13283  default:
    13284  VMA_ASSERT(0);
    13285  }
    13286  return res;
    13287 }
    13288 
    13289 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13290  VmaAllocation hAllocation,
    13291  VkDeviceSize offset, VkDeviceSize size,
    13292  VMA_CACHE_OPERATION op)
    13293 {
    13294  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13295  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13296  {
    13297  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13298  VMA_ASSERT(offset <= allocationSize);
    13299 
    13300  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13301 
    13302  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13303  memRange.memory = hAllocation->GetMemory();
    13304 
    13305  switch(hAllocation->GetType())
    13306  {
    13307  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13308  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13309  if(size == VK_WHOLE_SIZE)
    13310  {
    13311  memRange.size = allocationSize - memRange.offset;
    13312  }
    13313  else
    13314  {
    13315  VMA_ASSERT(offset + size <= allocationSize);
    13316  memRange.size = VMA_MIN(
    13317  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13318  allocationSize - memRange.offset);
    13319  }
    13320  break;
    13321 
    13322  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13323  {
    13324  // 1. Still within this allocation.
    13325  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13326  if(size == VK_WHOLE_SIZE)
    13327  {
    13328  size = allocationSize - offset;
    13329  }
    13330  else
    13331  {
    13332  VMA_ASSERT(offset + size <= allocationSize);
    13333  }
    13334  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13335 
    13336  // 2. Adjust to whole block.
    13337  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13338  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13339  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13340  memRange.offset += allocationOffset;
    13341  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13342 
    13343  break;
    13344  }
    13345 
    13346  default:
    13347  VMA_ASSERT(0);
    13348  }
    13349 
    13350  switch(op)
    13351  {
    13352  case VMA_CACHE_FLUSH:
    13353  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13354  break;
    13355  case VMA_CACHE_INVALIDATE:
    13356  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13357  break;
    13358  default:
    13359  VMA_ASSERT(0);
    13360  }
    13361  }
    13362  // else: Just ignore this call.
    13363 }
    13364 
    13365 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13366 {
    13367  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13368 
    13369  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13370  {
    13371  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13372  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13373  VMA_ASSERT(pDedicatedAllocations);
    13374  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13375  VMA_ASSERT(success);
    13376  }
    13377 
    13378  VkDeviceMemory hMemory = allocation->GetMemory();
    13379 
    13380  /*
    13381  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13382  before vkFreeMemory.
    13383 
    13384  if(allocation->GetMappedData() != VMA_NULL)
    13385  {
    13386  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13387  }
    13388  */
    13389 
    13390  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13391 
    13392  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13393 }
    13394 
    13395 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13396 {
    13397  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13398  !hAllocation->CanBecomeLost() &&
    13399  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13400  {
    13401  void* pData = VMA_NULL;
    13402  VkResult res = Map(hAllocation, &pData);
    13403  if(res == VK_SUCCESS)
    13404  {
    13405  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13406  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13407  Unmap(hAllocation);
    13408  }
    13409  else
    13410  {
    13411  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13412  }
    13413  }
    13414 }
    13415 
    13416 #if VMA_STATS_STRING_ENABLED
    13417 
    13418 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13419 {
    13420  bool dedicatedAllocationsStarted = false;
    13421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13422  {
    13423  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13424  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13425  VMA_ASSERT(pDedicatedAllocVector);
    13426  if(pDedicatedAllocVector->empty() == false)
    13427  {
    13428  if(dedicatedAllocationsStarted == false)
    13429  {
    13430  dedicatedAllocationsStarted = true;
    13431  json.WriteString("DedicatedAllocations");
    13432  json.BeginObject();
    13433  }
    13434 
    13435  json.BeginString("Type ");
    13436  json.ContinueString(memTypeIndex);
    13437  json.EndString();
    13438 
    13439  json.BeginArray();
    13440 
    13441  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13442  {
    13443  json.BeginObject(true);
    13444  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13445  hAlloc->PrintParameters(json);
    13446  json.EndObject();
    13447  }
    13448 
    13449  json.EndArray();
    13450  }
    13451  }
    13452  if(dedicatedAllocationsStarted)
    13453  {
    13454  json.EndObject();
    13455  }
    13456 
    13457  {
    13458  bool allocationsStarted = false;
    13459  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13460  {
    13461  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13462  {
    13463  if(allocationsStarted == false)
    13464  {
    13465  allocationsStarted = true;
    13466  json.WriteString("DefaultPools");
    13467  json.BeginObject();
    13468  }
    13469 
    13470  json.BeginString("Type ");
    13471  json.ContinueString(memTypeIndex);
    13472  json.EndString();
    13473 
    13474  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13475  }
    13476  }
    13477  if(allocationsStarted)
    13478  {
    13479  json.EndObject();
    13480  }
    13481  }
    13482 
    13483  // Custom pools
    13484  {
    13485  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13486  const size_t poolCount = m_Pools.size();
    13487  if(poolCount > 0)
    13488  {
    13489  json.WriteString("Pools");
    13490  json.BeginObject();
    13491  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13492  {
    13493  json.BeginString();
    13494  json.ContinueString(m_Pools[poolIndex]->GetId());
    13495  json.EndString();
    13496 
    13497  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13498  }
    13499  json.EndObject();
    13500  }
    13501  }
    13502 }
    13503 
    13504 #endif // #if VMA_STATS_STRING_ENABLED
    13505 
    13507 // Public interface
    13508 
    13509 VkResult vmaCreateAllocator(
    13510  const VmaAllocatorCreateInfo* pCreateInfo,
    13511  VmaAllocator* pAllocator)
    13512 {
    13513  VMA_ASSERT(pCreateInfo && pAllocator);
    13514  VMA_DEBUG_LOG("vmaCreateAllocator");
    13515  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13516  return (*pAllocator)->Init(pCreateInfo);
    13517 }
    13518 
    13519 void vmaDestroyAllocator(
    13520  VmaAllocator allocator)
    13521 {
    13522  if(allocator != VK_NULL_HANDLE)
    13523  {
    13524  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13525  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13526  vma_delete(&allocationCallbacks, allocator);
    13527  }
    13528 }
    13529 
    13531  VmaAllocator allocator,
    13532  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13533 {
    13534  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13535  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13536 }
    13537 
    13539  VmaAllocator allocator,
    13540  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13541 {
    13542  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13543  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13544 }
    13545 
    13547  VmaAllocator allocator,
    13548  uint32_t memoryTypeIndex,
    13549  VkMemoryPropertyFlags* pFlags)
    13550 {
    13551  VMA_ASSERT(allocator && pFlags);
    13552  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13553  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13554 }
    13555 
    13557  VmaAllocator allocator,
    13558  uint32_t frameIndex)
    13559 {
    13560  VMA_ASSERT(allocator);
    13561  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13562 
    13563  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13564 
    13565  allocator->SetCurrentFrameIndex(frameIndex);
    13566 }
    13567 
    13568 void vmaCalculateStats(
    13569  VmaAllocator allocator,
    13570  VmaStats* pStats)
    13571 {
    13572  VMA_ASSERT(allocator && pStats);
    13573  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13574  allocator->CalculateStats(pStats);
    13575 }
    13576 
    13577 #if VMA_STATS_STRING_ENABLED
    13578 
    13579 void vmaBuildStatsString(
    13580  VmaAllocator allocator,
    13581  char** ppStatsString,
    13582  VkBool32 detailedMap)
    13583 {
    13584  VMA_ASSERT(allocator && ppStatsString);
    13585  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13586 
    13587  VmaStringBuilder sb(allocator);
    13588  {
    13589  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13590  json.BeginObject();
    13591 
    13592  VmaStats stats;
    13593  allocator->CalculateStats(&stats);
    13594 
    13595  json.WriteString("Total");
    13596  VmaPrintStatInfo(json, stats.total);
    13597 
    13598  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13599  {
    13600  json.BeginString("Heap ");
    13601  json.ContinueString(heapIndex);
    13602  json.EndString();
    13603  json.BeginObject();
    13604 
    13605  json.WriteString("Size");
    13606  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13607 
    13608  json.WriteString("Flags");
    13609  json.BeginArray(true);
    13610  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13611  {
    13612  json.WriteString("DEVICE_LOCAL");
    13613  }
    13614  json.EndArray();
    13615 
    13616  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13617  {
    13618  json.WriteString("Stats");
    13619  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13620  }
    13621 
    13622  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13623  {
    13624  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13625  {
    13626  json.BeginString("Type ");
    13627  json.ContinueString(typeIndex);
    13628  json.EndString();
    13629 
    13630  json.BeginObject();
    13631 
    13632  json.WriteString("Flags");
    13633  json.BeginArray(true);
    13634  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13635  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13636  {
    13637  json.WriteString("DEVICE_LOCAL");
    13638  }
    13639  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13640  {
    13641  json.WriteString("HOST_VISIBLE");
    13642  }
    13643  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13644  {
    13645  json.WriteString("HOST_COHERENT");
    13646  }
    13647  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13648  {
    13649  json.WriteString("HOST_CACHED");
    13650  }
    13651  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13652  {
    13653  json.WriteString("LAZILY_ALLOCATED");
    13654  }
    13655  json.EndArray();
    13656 
    13657  if(stats.memoryType[typeIndex].blockCount > 0)
    13658  {
    13659  json.WriteString("Stats");
    13660  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13661  }
    13662 
    13663  json.EndObject();
    13664  }
    13665  }
    13666 
    13667  json.EndObject();
    13668  }
    13669  if(detailedMap == VK_TRUE)
    13670  {
    13671  allocator->PrintDetailedMap(json);
    13672  }
    13673 
    13674  json.EndObject();
    13675  }
    13676 
    13677  const size_t len = sb.GetLength();
    13678  char* const pChars = vma_new_array(allocator, char, len + 1);
    13679  if(len > 0)
    13680  {
    13681  memcpy(pChars, sb.GetData(), len);
    13682  }
    13683  pChars[len] = '\0';
    13684  *ppStatsString = pChars;
    13685 }
    13686 
    13687 void vmaFreeStatsString(
    13688  VmaAllocator allocator,
    13689  char* pStatsString)
    13690 {
    13691  if(pStatsString != VMA_NULL)
    13692  {
    13693  VMA_ASSERT(allocator);
    13694  size_t len = strlen(pStatsString);
    13695  vma_delete_array(allocator, pStatsString, len + 1);
    13696  }
    13697 }
    13698 
    13699 #endif // #if VMA_STATS_STRING_ENABLED
    13700 
    13701 /*
    13702 This function is not protected by any mutex because it just reads immutable data.
    13703 */
    13704 VkResult vmaFindMemoryTypeIndex(
    13705  VmaAllocator allocator,
    13706  uint32_t memoryTypeBits,
    13707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13708  uint32_t* pMemoryTypeIndex)
    13709 {
    13710  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13711  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13712  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13713 
    13714  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13715  {
    13716  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13717  }
    13718 
    13719  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13720  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13721 
    13722  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13723  if(mapped)
    13724  {
    13725  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13726  }
    13727 
    13728  // Convert usage to requiredFlags and preferredFlags.
    13729  switch(pAllocationCreateInfo->usage)
    13730  {
    13732  break;
    13734  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13735  {
    13736  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13737  }
    13738  break;
    13740  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13741  break;
    13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13744  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13745  {
    13746  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13747  }
    13748  break;
    13750  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13751  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13752  break;
    13753  default:
    13754  break;
    13755  }
    13756 
    13757  *pMemoryTypeIndex = UINT32_MAX;
    13758  uint32_t minCost = UINT32_MAX;
    13759  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13760  memTypeIndex < allocator->GetMemoryTypeCount();
    13761  ++memTypeIndex, memTypeBit <<= 1)
    13762  {
    13763  // This memory type is acceptable according to memoryTypeBits bitmask.
    13764  if((memTypeBit & memoryTypeBits) != 0)
    13765  {
    13766  const VkMemoryPropertyFlags currFlags =
    13767  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13768  // This memory type contains requiredFlags.
    13769  if((requiredFlags & ~currFlags) == 0)
    13770  {
    13771  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13772  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13773  // Remember memory type with lowest cost.
    13774  if(currCost < minCost)
    13775  {
    13776  *pMemoryTypeIndex = memTypeIndex;
    13777  if(currCost == 0)
    13778  {
    13779  return VK_SUCCESS;
    13780  }
    13781  minCost = currCost;
    13782  }
    13783  }
    13784  }
    13785  }
    13786  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13787 }
    13788 
    13790  VmaAllocator allocator,
    13791  const VkBufferCreateInfo* pBufferCreateInfo,
    13792  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13793  uint32_t* pMemoryTypeIndex)
    13794 {
    13795  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13796  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13797  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13798  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13799 
    13800  const VkDevice hDev = allocator->m_hDevice;
    13801  VkBuffer hBuffer = VK_NULL_HANDLE;
    13802  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13803  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13804  if(res == VK_SUCCESS)
    13805  {
    13806  VkMemoryRequirements memReq = {};
    13807  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13808  hDev, hBuffer, &memReq);
    13809 
    13810  res = vmaFindMemoryTypeIndex(
    13811  allocator,
    13812  memReq.memoryTypeBits,
    13813  pAllocationCreateInfo,
    13814  pMemoryTypeIndex);
    13815 
    13816  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13817  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13818  }
    13819  return res;
    13820 }
    13821 
    13823  VmaAllocator allocator,
    13824  const VkImageCreateInfo* pImageCreateInfo,
    13825  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13826  uint32_t* pMemoryTypeIndex)
    13827 {
    13828  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13829  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13830  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13831  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13832 
    13833  const VkDevice hDev = allocator->m_hDevice;
    13834  VkImage hImage = VK_NULL_HANDLE;
    13835  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13836  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13837  if(res == VK_SUCCESS)
    13838  {
    13839  VkMemoryRequirements memReq = {};
    13840  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13841  hDev, hImage, &memReq);
    13842 
    13843  res = vmaFindMemoryTypeIndex(
    13844  allocator,
    13845  memReq.memoryTypeBits,
    13846  pAllocationCreateInfo,
    13847  pMemoryTypeIndex);
    13848 
    13849  allocator->GetVulkanFunctions().vkDestroyImage(
    13850  hDev, hImage, allocator->GetAllocationCallbacks());
    13851  }
    13852  return res;
    13853 }
    13854 
    13855 VkResult vmaCreatePool(
    13856  VmaAllocator allocator,
    13857  const VmaPoolCreateInfo* pCreateInfo,
    13858  VmaPool* pPool)
    13859 {
    13860  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13861 
    13862  VMA_DEBUG_LOG("vmaCreatePool");
    13863 
    13864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13865 
    13866  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13867 
    13868 #if VMA_RECORDING_ENABLED
    13869  if(allocator->GetRecorder() != VMA_NULL)
    13870  {
    13871  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13872  }
    13873 #endif
    13874 
    13875  return res;
    13876 }
    13877 
    13878 void vmaDestroyPool(
    13879  VmaAllocator allocator,
    13880  VmaPool pool)
    13881 {
    13882  VMA_ASSERT(allocator);
    13883 
    13884  if(pool == VK_NULL_HANDLE)
    13885  {
    13886  return;
    13887  }
    13888 
    13889  VMA_DEBUG_LOG("vmaDestroyPool");
    13890 
    13891  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13892 
    13893 #if VMA_RECORDING_ENABLED
    13894  if(allocator->GetRecorder() != VMA_NULL)
    13895  {
    13896  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13897  }
    13898 #endif
    13899 
    13900  allocator->DestroyPool(pool);
    13901 }
    13902 
    13903 void vmaGetPoolStats(
    13904  VmaAllocator allocator,
    13905  VmaPool pool,
    13906  VmaPoolStats* pPoolStats)
    13907 {
    13908  VMA_ASSERT(allocator && pool && pPoolStats);
    13909 
    13910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13911 
    13912  allocator->GetPoolStats(pool, pPoolStats);
    13913 }
    13914 
    13916  VmaAllocator allocator,
    13917  VmaPool pool,
    13918  size_t* pLostAllocationCount)
    13919 {
    13920  VMA_ASSERT(allocator && pool);
    13921 
    13922  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13923 
    13924 #if VMA_RECORDING_ENABLED
    13925  if(allocator->GetRecorder() != VMA_NULL)
    13926  {
    13927  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13928  }
    13929 #endif
    13930 
    13931  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13932 }
    13933 
    13934 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13935 {
    13936  VMA_ASSERT(allocator && pool);
    13937 
    13938  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13939 
    13940  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13941 
    13942  return allocator->CheckPoolCorruption(pool);
    13943 }
    13944 
    13945 VkResult vmaAllocateMemory(
    13946  VmaAllocator allocator,
    13947  const VkMemoryRequirements* pVkMemoryRequirements,
    13948  const VmaAllocationCreateInfo* pCreateInfo,
    13949  VmaAllocation* pAllocation,
    13950  VmaAllocationInfo* pAllocationInfo)
    13951 {
    13952  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13953 
    13954  VMA_DEBUG_LOG("vmaAllocateMemory");
    13955 
    13956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13957 
    13958  VkResult result = allocator->AllocateMemory(
    13959  *pVkMemoryRequirements,
    13960  false, // requiresDedicatedAllocation
    13961  false, // prefersDedicatedAllocation
    13962  VK_NULL_HANDLE, // dedicatedBuffer
    13963  VK_NULL_HANDLE, // dedicatedImage
    13964  *pCreateInfo,
    13965  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13966  pAllocation);
    13967 
    13968 #if VMA_RECORDING_ENABLED
    13969  if(allocator->GetRecorder() != VMA_NULL)
    13970  {
    13971  allocator->GetRecorder()->RecordAllocateMemory(
    13972  allocator->GetCurrentFrameIndex(),
    13973  *pVkMemoryRequirements,
    13974  *pCreateInfo,
    13975  *pAllocation);
    13976  }
    13977 #endif
    13978 
    13979  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13980  {
    13981  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13982  }
    13983 
    13984  return result;
    13985 }
    13986 
    13988  VmaAllocator allocator,
    13989  VkBuffer buffer,
    13990  const VmaAllocationCreateInfo* pCreateInfo,
    13991  VmaAllocation* pAllocation,
    13992  VmaAllocationInfo* pAllocationInfo)
    13993 {
    13994  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13995 
    13996  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    13997 
    13998  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13999 
    14000  VkMemoryRequirements vkMemReq = {};
    14001  bool requiresDedicatedAllocation = false;
    14002  bool prefersDedicatedAllocation = false;
    14003  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14004  requiresDedicatedAllocation,
    14005  prefersDedicatedAllocation);
    14006 
    14007  VkResult result = allocator->AllocateMemory(
    14008  vkMemReq,
    14009  requiresDedicatedAllocation,
    14010  prefersDedicatedAllocation,
    14011  buffer, // dedicatedBuffer
    14012  VK_NULL_HANDLE, // dedicatedImage
    14013  *pCreateInfo,
    14014  VMA_SUBALLOCATION_TYPE_BUFFER,
    14015  pAllocation);
    14016 
    14017 #if VMA_RECORDING_ENABLED
    14018  if(allocator->GetRecorder() != VMA_NULL)
    14019  {
    14020  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14021  allocator->GetCurrentFrameIndex(),
    14022  vkMemReq,
    14023  requiresDedicatedAllocation,
    14024  prefersDedicatedAllocation,
    14025  *pCreateInfo,
    14026  *pAllocation);
    14027  }
    14028 #endif
    14029 
    14030  if(pAllocationInfo && result == VK_SUCCESS)
    14031  {
    14032  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14033  }
    14034 
    14035  return result;
    14036 }
    14037 
    14038 VkResult vmaAllocateMemoryForImage(
    14039  VmaAllocator allocator,
    14040  VkImage image,
    14041  const VmaAllocationCreateInfo* pCreateInfo,
    14042  VmaAllocation* pAllocation,
    14043  VmaAllocationInfo* pAllocationInfo)
    14044 {
    14045  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14046 
    14047  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14048 
    14049  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14050 
    14051  VkMemoryRequirements vkMemReq = {};
    14052  bool requiresDedicatedAllocation = false;
    14053  bool prefersDedicatedAllocation = false;
    14054  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14055  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14056 
    14057  VkResult result = allocator->AllocateMemory(
    14058  vkMemReq,
    14059  requiresDedicatedAllocation,
    14060  prefersDedicatedAllocation,
    14061  VK_NULL_HANDLE, // dedicatedBuffer
    14062  image, // dedicatedImage
    14063  *pCreateInfo,
    14064  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14065  pAllocation);
    14066 
    14067 #if VMA_RECORDING_ENABLED
    14068  if(allocator->GetRecorder() != VMA_NULL)
    14069  {
    14070  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14071  allocator->GetCurrentFrameIndex(),
    14072  vkMemReq,
    14073  requiresDedicatedAllocation,
    14074  prefersDedicatedAllocation,
    14075  *pCreateInfo,
    14076  *pAllocation);
    14077  }
    14078 #endif
    14079 
    14080  if(pAllocationInfo && result == VK_SUCCESS)
    14081  {
    14082  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14083  }
    14084 
    14085  return result;
    14086 }
    14087 
    14088 void vmaFreeMemory(
    14089  VmaAllocator allocator,
    14090  VmaAllocation allocation)
    14091 {
    14092  VMA_ASSERT(allocator);
    14093 
    14094  if(allocation == VK_NULL_HANDLE)
    14095  {
    14096  return;
    14097  }
    14098 
    14099  VMA_DEBUG_LOG("vmaFreeMemory");
    14100 
    14101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14102 
    14103 #if VMA_RECORDING_ENABLED
    14104  if(allocator->GetRecorder() != VMA_NULL)
    14105  {
    14106  allocator->GetRecorder()->RecordFreeMemory(
    14107  allocator->GetCurrentFrameIndex(),
    14108  allocation);
    14109  }
    14110 #endif
    14111 
    14112  allocator->FreeMemory(allocation);
    14113 }
    14114 
    14115 VkResult vmaResizeAllocation(
    14116  VmaAllocator allocator,
    14117  VmaAllocation allocation,
    14118  VkDeviceSize newSize)
    14119 {
    14120  VMA_ASSERT(allocator && allocation);
    14121 
    14122  VMA_DEBUG_LOG("vmaResizeAllocation");
    14123 
    14124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14125 
    14126 #if VMA_RECORDING_ENABLED
    14127  if(allocator->GetRecorder() != VMA_NULL)
    14128  {
    14129  allocator->GetRecorder()->RecordResizeAllocation(
    14130  allocator->GetCurrentFrameIndex(),
    14131  allocation,
    14132  newSize);
    14133  }
    14134 #endif
    14135 
    14136  return allocator->ResizeAllocation(allocation, newSize);
    14137 }
    14138 
    14140  VmaAllocator allocator,
    14141  VmaAllocation allocation,
    14142  VmaAllocationInfo* pAllocationInfo)
    14143 {
    14144  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14145 
    14146  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14147 
    14148 #if VMA_RECORDING_ENABLED
    14149  if(allocator->GetRecorder() != VMA_NULL)
    14150  {
    14151  allocator->GetRecorder()->RecordGetAllocationInfo(
    14152  allocator->GetCurrentFrameIndex(),
    14153  allocation);
    14154  }
    14155 #endif
    14156 
    14157  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14158 }
    14159 
    14160 VkBool32 vmaTouchAllocation(
    14161  VmaAllocator allocator,
    14162  VmaAllocation allocation)
    14163 {
    14164  VMA_ASSERT(allocator && allocation);
    14165 
    14166  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14167 
    14168 #if VMA_RECORDING_ENABLED
    14169  if(allocator->GetRecorder() != VMA_NULL)
    14170  {
    14171  allocator->GetRecorder()->RecordTouchAllocation(
    14172  allocator->GetCurrentFrameIndex(),
    14173  allocation);
    14174  }
    14175 #endif
    14176 
    14177  return allocator->TouchAllocation(allocation);
    14178 }
    14179 
    14181  VmaAllocator allocator,
    14182  VmaAllocation allocation,
    14183  void* pUserData)
    14184 {
    14185  VMA_ASSERT(allocator && allocation);
    14186 
    14187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14188 
    14189  allocation->SetUserData(allocator, pUserData);
    14190 
    14191 #if VMA_RECORDING_ENABLED
    14192  if(allocator->GetRecorder() != VMA_NULL)
    14193  {
    14194  allocator->GetRecorder()->RecordSetAllocationUserData(
    14195  allocator->GetCurrentFrameIndex(),
    14196  allocation,
    14197  pUserData);
    14198  }
    14199 #endif
    14200 }
    14201 
    14203  VmaAllocator allocator,
    14204  VmaAllocation* pAllocation)
    14205 {
    14206  VMA_ASSERT(allocator && pAllocation);
    14207 
    14208  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14209 
    14210  allocator->CreateLostAllocation(pAllocation);
    14211 
    14212 #if VMA_RECORDING_ENABLED
    14213  if(allocator->GetRecorder() != VMA_NULL)
    14214  {
    14215  allocator->GetRecorder()->RecordCreateLostAllocation(
    14216  allocator->GetCurrentFrameIndex(),
    14217  *pAllocation);
    14218  }
    14219 #endif
    14220 }
    14221 
    14222 VkResult vmaMapMemory(
    14223  VmaAllocator allocator,
    14224  VmaAllocation allocation,
    14225  void** ppData)
    14226 {
    14227  VMA_ASSERT(allocator && allocation && ppData);
    14228 
    14229  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14230 
    14231  VkResult res = allocator->Map(allocation, ppData);
    14232 
    14233 #if VMA_RECORDING_ENABLED
    14234  if(allocator->GetRecorder() != VMA_NULL)
    14235  {
    14236  allocator->GetRecorder()->RecordMapMemory(
    14237  allocator->GetCurrentFrameIndex(),
    14238  allocation);
    14239  }
    14240 #endif
    14241 
    14242  return res;
    14243 }
    14244 
    14245 void vmaUnmapMemory(
    14246  VmaAllocator allocator,
    14247  VmaAllocation allocation)
    14248 {
    14249  VMA_ASSERT(allocator && allocation);
    14250 
    14251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14252 
    14253 #if VMA_RECORDING_ENABLED
    14254  if(allocator->GetRecorder() != VMA_NULL)
    14255  {
    14256  allocator->GetRecorder()->RecordUnmapMemory(
    14257  allocator->GetCurrentFrameIndex(),
    14258  allocation);
    14259  }
    14260 #endif
    14261 
    14262  allocator->Unmap(allocation);
    14263 }
    14264 
    14265 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14266 {
    14267  VMA_ASSERT(allocator && allocation);
    14268 
    14269  VMA_DEBUG_LOG("vmaFlushAllocation");
    14270 
    14271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14272 
    14273  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14274 
    14275 #if VMA_RECORDING_ENABLED
    14276  if(allocator->GetRecorder() != VMA_NULL)
    14277  {
    14278  allocator->GetRecorder()->RecordFlushAllocation(
    14279  allocator->GetCurrentFrameIndex(),
    14280  allocation, offset, size);
    14281  }
    14282 #endif
    14283 }
    14284 
    14285 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14286 {
    14287  VMA_ASSERT(allocator && allocation);
    14288 
    14289  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14290 
    14291  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14292 
    14293  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14294 
    14295 #if VMA_RECORDING_ENABLED
    14296  if(allocator->GetRecorder() != VMA_NULL)
    14297  {
    14298  allocator->GetRecorder()->RecordInvalidateAllocation(
    14299  allocator->GetCurrentFrameIndex(),
    14300  allocation, offset, size);
    14301  }
    14302 #endif
    14303 }
    14304 
    14305 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14306 {
    14307  VMA_ASSERT(allocator);
    14308 
    14309  VMA_DEBUG_LOG("vmaCheckCorruption");
    14310 
    14311  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14312 
    14313  return allocator->CheckCorruption(memoryTypeBits);
    14314 }
    14315 
    14316 VkResult vmaDefragment(
    14317  VmaAllocator allocator,
    14318  VmaAllocation* pAllocations,
    14319  size_t allocationCount,
    14320  VkBool32* pAllocationsChanged,
    14321  const VmaDefragmentationInfo *pDefragmentationInfo,
    14322  VmaDefragmentationStats* pDefragmentationStats)
    14323 {
    14324  VMA_ASSERT(allocator && pAllocations);
    14325 
    14326  VMA_DEBUG_LOG("vmaDefragment");
    14327 
    14328  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14329 
    14330  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14331 }
    14332 
    14333 VkResult vmaBindBufferMemory(
    14334  VmaAllocator allocator,
    14335  VmaAllocation allocation,
    14336  VkBuffer buffer)
    14337 {
    14338  VMA_ASSERT(allocator && allocation && buffer);
    14339 
    14340  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14341 
    14342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14343 
    14344  return allocator->BindBufferMemory(allocation, buffer);
    14345 }
    14346 
    14347 VkResult vmaBindImageMemory(
    14348  VmaAllocator allocator,
    14349  VmaAllocation allocation,
    14350  VkImage image)
    14351 {
    14352  VMA_ASSERT(allocator && allocation && image);
    14353 
    14354  VMA_DEBUG_LOG("vmaBindImageMemory");
    14355 
    14356  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14357 
    14358  return allocator->BindImageMemory(allocation, image);
    14359 }
    14360 
    14361 VkResult vmaCreateBuffer(
    14362  VmaAllocator allocator,
    14363  const VkBufferCreateInfo* pBufferCreateInfo,
    14364  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14365  VkBuffer* pBuffer,
    14366  VmaAllocation* pAllocation,
    14367  VmaAllocationInfo* pAllocationInfo)
    14368 {
    14369  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14370 
    14371  if(pBufferCreateInfo->size == 0)
    14372  {
    14373  return VK_ERROR_VALIDATION_FAILED_EXT;
    14374  }
    14375 
    14376  VMA_DEBUG_LOG("vmaCreateBuffer");
    14377 
    14378  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14379 
    14380  *pBuffer = VK_NULL_HANDLE;
    14381  *pAllocation = VK_NULL_HANDLE;
    14382 
    14383  // 1. Create VkBuffer.
    14384  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14385  allocator->m_hDevice,
    14386  pBufferCreateInfo,
    14387  allocator->GetAllocationCallbacks(),
    14388  pBuffer);
    14389  if(res >= 0)
    14390  {
    14391  // 2. vkGetBufferMemoryRequirements.
    14392  VkMemoryRequirements vkMemReq = {};
    14393  bool requiresDedicatedAllocation = false;
    14394  bool prefersDedicatedAllocation = false;
    14395  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14396  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14397 
    14398  // Make sure alignment requirements for specific buffer usages reported
    14399  // in Physical Device Properties are included in alignment reported by memory requirements.
    14400  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14401  {
    14402  VMA_ASSERT(vkMemReq.alignment %
    14403  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14404  }
    14405  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14406  {
    14407  VMA_ASSERT(vkMemReq.alignment %
    14408  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14409  }
    14410  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14411  {
    14412  VMA_ASSERT(vkMemReq.alignment %
    14413  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14414  }
    14415 
    14416  // 3. Allocate memory using allocator.
    14417  res = allocator->AllocateMemory(
    14418  vkMemReq,
    14419  requiresDedicatedAllocation,
    14420  prefersDedicatedAllocation,
    14421  *pBuffer, // dedicatedBuffer
    14422  VK_NULL_HANDLE, // dedicatedImage
    14423  *pAllocationCreateInfo,
    14424  VMA_SUBALLOCATION_TYPE_BUFFER,
    14425  pAllocation);
    14426 
    14427 #if VMA_RECORDING_ENABLED
    14428  if(allocator->GetRecorder() != VMA_NULL)
    14429  {
    14430  allocator->GetRecorder()->RecordCreateBuffer(
    14431  allocator->GetCurrentFrameIndex(),
    14432  *pBufferCreateInfo,
    14433  *pAllocationCreateInfo,
    14434  *pAllocation);
    14435  }
    14436 #endif
    14437 
    14438  if(res >= 0)
    14439  {
    14440  // 3. Bind buffer with memory.
    14441  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14442  if(res >= 0)
    14443  {
    14444  // All steps succeeded.
    14445  #if VMA_STATS_STRING_ENABLED
    14446  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14447  #endif
    14448  if(pAllocationInfo != VMA_NULL)
    14449  {
    14450  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14451  }
    14452 
    14453  return VK_SUCCESS;
    14454  }
    14455  allocator->FreeMemory(*pAllocation);
    14456  *pAllocation = VK_NULL_HANDLE;
    14457  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14458  *pBuffer = VK_NULL_HANDLE;
    14459  return res;
    14460  }
    14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14462  *pBuffer = VK_NULL_HANDLE;
    14463  return res;
    14464  }
    14465  return res;
    14466 }
    14467 
    14468 void vmaDestroyBuffer(
    14469  VmaAllocator allocator,
    14470  VkBuffer buffer,
    14471  VmaAllocation allocation)
    14472 {
    14473  VMA_ASSERT(allocator);
    14474 
    14475  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14476  {
    14477  return;
    14478  }
    14479 
    14480  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14481 
    14482  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14483 
    14484 #if VMA_RECORDING_ENABLED
    14485  if(allocator->GetRecorder() != VMA_NULL)
    14486  {
    14487  allocator->GetRecorder()->RecordDestroyBuffer(
    14488  allocator->GetCurrentFrameIndex(),
    14489  allocation);
    14490  }
    14491 #endif
    14492 
    14493  if(buffer != VK_NULL_HANDLE)
    14494  {
    14495  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14496  }
    14497 
    14498  if(allocation != VK_NULL_HANDLE)
    14499  {
    14500  allocator->FreeMemory(allocation);
    14501  }
    14502 }
    14503 
    14504 VkResult vmaCreateImage(
    14505  VmaAllocator allocator,
    14506  const VkImageCreateInfo* pImageCreateInfo,
    14507  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14508  VkImage* pImage,
    14509  VmaAllocation* pAllocation,
    14510  VmaAllocationInfo* pAllocationInfo)
    14511 {
    14512  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14513 
    14514  if(pImageCreateInfo->extent.width == 0 ||
    14515  pImageCreateInfo->extent.height == 0 ||
    14516  pImageCreateInfo->extent.depth == 0 ||
    14517  pImageCreateInfo->mipLevels == 0 ||
    14518  pImageCreateInfo->arrayLayers == 0)
    14519  {
    14520  return VK_ERROR_VALIDATION_FAILED_EXT;
    14521  }
    14522 
    14523  VMA_DEBUG_LOG("vmaCreateImage");
    14524 
    14525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14526 
    14527  *pImage = VK_NULL_HANDLE;
    14528  *pAllocation = VK_NULL_HANDLE;
    14529 
    14530  // 1. Create VkImage.
    14531  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14532  allocator->m_hDevice,
    14533  pImageCreateInfo,
    14534  allocator->GetAllocationCallbacks(),
    14535  pImage);
    14536  if(res >= 0)
    14537  {
    14538  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14539  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14540  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14541 
    14542  // 2. Allocate memory using allocator.
    14543  VkMemoryRequirements vkMemReq = {};
    14544  bool requiresDedicatedAllocation = false;
    14545  bool prefersDedicatedAllocation = false;
    14546  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14547  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14548 
    14549  res = allocator->AllocateMemory(
    14550  vkMemReq,
    14551  requiresDedicatedAllocation,
    14552  prefersDedicatedAllocation,
    14553  VK_NULL_HANDLE, // dedicatedBuffer
    14554  *pImage, // dedicatedImage
    14555  *pAllocationCreateInfo,
    14556  suballocType,
    14557  pAllocation);
    14558 
    14559 #if VMA_RECORDING_ENABLED
    14560  if(allocator->GetRecorder() != VMA_NULL)
    14561  {
    14562  allocator->GetRecorder()->RecordCreateImage(
    14563  allocator->GetCurrentFrameIndex(),
    14564  *pImageCreateInfo,
    14565  *pAllocationCreateInfo,
    14566  *pAllocation);
    14567  }
    14568 #endif
    14569 
    14570  if(res >= 0)
    14571  {
    14572  // 3. Bind image with memory.
    14573  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14574  if(res >= 0)
    14575  {
    14576  // All steps succeeded.
    14577  #if VMA_STATS_STRING_ENABLED
    14578  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14579  #endif
    14580  if(pAllocationInfo != VMA_NULL)
    14581  {
    14582  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14583  }
    14584 
    14585  return VK_SUCCESS;
    14586  }
    14587  allocator->FreeMemory(*pAllocation);
    14588  *pAllocation = VK_NULL_HANDLE;
    14589  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14590  *pImage = VK_NULL_HANDLE;
    14591  return res;
    14592  }
    14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14594  *pImage = VK_NULL_HANDLE;
    14595  return res;
    14596  }
    14597  return res;
    14598 }
    14599 
    14600 void vmaDestroyImage(
    14601  VmaAllocator allocator,
    14602  VkImage image,
    14603  VmaAllocation allocation)
    14604 {
    14605  VMA_ASSERT(allocator);
    14606 
    14607  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14608  {
    14609  return;
    14610  }
    14611 
    14612  VMA_DEBUG_LOG("vmaDestroyImage");
    14613 
    14614  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14615 
    14616 #if VMA_RECORDING_ENABLED
    14617  if(allocator->GetRecorder() != VMA_NULL)
    14618  {
    14619  allocator->GetRecorder()->RecordDestroyImage(
    14620  allocator->GetCurrentFrameIndex(),
    14621  allocation);
    14622  }
    14623 #endif
    14624 
    14625  if(image != VK_NULL_HANDLE)
    14626  {
    14627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14628  }
    14629  if(allocation != VK_NULL_HANDLE)
    14630  {
    14631  allocator->FreeMemory(allocation);
    14632  }
    14633 }
    14634 
    14635 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1693  const VkDeviceSize* pHeapSizeLimit;
    1714 
    1716 VkResult vmaCreateAllocator(
    1717  const VmaAllocatorCreateInfo* pCreateInfo,
    1718  VmaAllocator* pAllocator);
    1719 
    1721 void vmaDestroyAllocator(
    1722  VmaAllocator allocator);
    1723 
    1729  VmaAllocator allocator,
    1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1731 
    1737  VmaAllocator allocator,
    1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1739 
    1747  VmaAllocator allocator,
    1748  uint32_t memoryTypeIndex,
    1749  VkMemoryPropertyFlags* pFlags);
    1750 
    1760  VmaAllocator allocator,
    1761  uint32_t frameIndex);
    1762 
    1765 typedef struct VmaStatInfo
    1766 {
    1768  uint32_t blockCount;
    1774  VkDeviceSize usedBytes;
    1776  VkDeviceSize unusedBytes;
    1779 } VmaStatInfo;
    1780 
    1782 typedef struct VmaStats
    1783 {
    1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1787 } VmaStats;
    1788 
    1790 void vmaCalculateStats(
    1791  VmaAllocator allocator,
    1792  VmaStats* pStats);
    1793 
    1794 #define VMA_STATS_STRING_ENABLED 1
    1795 
    1796 #if VMA_STATS_STRING_ENABLED
    1797 
    1799 
    1801 void vmaBuildStatsString(
    1802  VmaAllocator allocator,
    1803  char** ppStatsString,
    1804  VkBool32 detailedMap);
    1805 
    1806 void vmaFreeStatsString(
    1807  VmaAllocator allocator,
    1808  char* pStatsString);
    1809 
    1810 #endif // #if VMA_STATS_STRING_ENABLED
    1811 
    1820 VK_DEFINE_HANDLE(VmaPool)
    1821 
    1822 typedef enum VmaMemoryUsage
    1823 {
    1872 } VmaMemoryUsage;
    1873 
    1888 
    1943 
    1959 
    1969 
    1976 
    1980 
    1982 {
    1995  VkMemoryPropertyFlags requiredFlags;
    2000  VkMemoryPropertyFlags preferredFlags;
    2008  uint32_t memoryTypeBits;
    2021  void* pUserData;
    2023 
    2040 VkResult vmaFindMemoryTypeIndex(
    2041  VmaAllocator allocator,
    2042  uint32_t memoryTypeBits,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkBufferCreateInfo* pBufferCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2077  VmaAllocator allocator,
    2078  const VkImageCreateInfo* pImageCreateInfo,
    2079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2080  uint32_t* pMemoryTypeIndex);
    2081 
    2102 
    2119 
    2130 
    2136 
    2139 typedef VkFlags VmaPoolCreateFlags;
    2140 
    2143 typedef struct VmaPoolCreateInfo {
    2158  VkDeviceSize blockSize;
    2187 
    2190 typedef struct VmaPoolStats {
    2193  VkDeviceSize size;
    2196  VkDeviceSize unusedSize;
    2209  VkDeviceSize unusedRangeSizeMax;
    2212  size_t blockCount;
    2213 } VmaPoolStats;
    2214 
    2221 VkResult vmaCreatePool(
    2222  VmaAllocator allocator,
    2223  const VmaPoolCreateInfo* pCreateInfo,
    2224  VmaPool* pPool);
    2225 
    2228 void vmaDestroyPool(
    2229  VmaAllocator allocator,
    2230  VmaPool pool);
    2231 
    2238 void vmaGetPoolStats(
    2239  VmaAllocator allocator,
    2240  VmaPool pool,
    2241  VmaPoolStats* pPoolStats);
    2242 
    2250  VmaAllocator allocator,
    2251  VmaPool pool,
    2252  size_t* pLostAllocationCount);
    2253 
    2268 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2269 
    2294 VK_DEFINE_HANDLE(VmaAllocation)
    2295 
    2296 
    2298 typedef struct VmaAllocationInfo {
    2303  uint32_t memoryType;
    2312  VkDeviceMemory deviceMemory;
    2317  VkDeviceSize offset;
    2322  VkDeviceSize size;
    2336  void* pUserData;
    2338 
    2349 VkResult vmaAllocateMemory(
    2350  VmaAllocator allocator,
    2351  const VkMemoryRequirements* pVkMemoryRequirements,
    2352  const VmaAllocationCreateInfo* pCreateInfo,
    2353  VmaAllocation* pAllocation,
    2354  VmaAllocationInfo* pAllocationInfo);
    2355 
    2363  VmaAllocator allocator,
    2364  VkBuffer buffer,
    2365  const VmaAllocationCreateInfo* pCreateInfo,
    2366  VmaAllocation* pAllocation,
    2367  VmaAllocationInfo* pAllocationInfo);
    2368 
    2370 VkResult vmaAllocateMemoryForImage(
    2371  VmaAllocator allocator,
    2372  VkImage image,
    2373  const VmaAllocationCreateInfo* pCreateInfo,
    2374  VmaAllocation* pAllocation,
    2375  VmaAllocationInfo* pAllocationInfo);
    2376 
    2378 void vmaFreeMemory(
    2379  VmaAllocator allocator,
    2380  VmaAllocation allocation);
    2381 
    2402 VkResult vmaResizeAllocation(
    2403  VmaAllocator allocator,
    2404  VmaAllocation allocation,
    2405  VkDeviceSize newSize);
    2406 
    2424  VmaAllocator allocator,
    2425  VmaAllocation allocation,
    2426  VmaAllocationInfo* pAllocationInfo);
    2427 
    2442 VkBool32 vmaTouchAllocation(
    2443  VmaAllocator allocator,
    2444  VmaAllocation allocation);
    2445 
    2460  VmaAllocator allocator,
    2461  VmaAllocation allocation,
    2462  void* pUserData);
    2463 
    2475  VmaAllocator allocator,
    2476  VmaAllocation* pAllocation);
    2477 
    2512 VkResult vmaMapMemory(
    2513  VmaAllocator allocator,
    2514  VmaAllocation allocation,
    2515  void** ppData);
    2516 
    2521 void vmaUnmapMemory(
    2522  VmaAllocator allocator,
    2523  VmaAllocation allocation);
    2524 
    2537 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2538 
    2551 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2552 
    2569 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2570 
    2572 typedef struct VmaDefragmentationInfo {
    2577  VkDeviceSize maxBytesToMove;
    2584 
    2586 typedef struct VmaDefragmentationStats {
    2588  VkDeviceSize bytesMoved;
    2590  VkDeviceSize bytesFreed;
    2596 
    2635 VkResult vmaDefragment(
    2636  VmaAllocator allocator,
    2637  VmaAllocation* pAllocations,
    2638  size_t allocationCount,
    2639  VkBool32* pAllocationsChanged,
    2640  const VmaDefragmentationInfo *pDefragmentationInfo,
    2641  VmaDefragmentationStats* pDefragmentationStats);
    2642 
    2655 VkResult vmaBindBufferMemory(
    2656  VmaAllocator allocator,
    2657  VmaAllocation allocation,
    2658  VkBuffer buffer);
    2659 
    2672 VkResult vmaBindImageMemory(
    2673  VmaAllocator allocator,
    2674  VmaAllocation allocation,
    2675  VkImage image);
    2676 
    2703 VkResult vmaCreateBuffer(
    2704  VmaAllocator allocator,
    2705  const VkBufferCreateInfo* pBufferCreateInfo,
    2706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2707  VkBuffer* pBuffer,
    2708  VmaAllocation* pAllocation,
    2709  VmaAllocationInfo* pAllocationInfo);
    2710 
    2722 void vmaDestroyBuffer(
    2723  VmaAllocator allocator,
    2724  VkBuffer buffer,
    2725  VmaAllocation allocation);
    2726 
    2728 VkResult vmaCreateImage(
    2729  VmaAllocator allocator,
    2730  const VkImageCreateInfo* pImageCreateInfo,
    2731  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2732  VkImage* pImage,
    2733  VmaAllocation* pAllocation,
    2734  VmaAllocationInfo* pAllocationInfo);
    2735 
    2747 void vmaDestroyImage(
    2748  VmaAllocator allocator,
    2749  VkImage image,
    2750  VmaAllocation allocation);
    2751 
    2752 #ifdef __cplusplus
    2753 }
    2754 #endif
    2755 
    2756 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2757 
    2758 // For Visual Studio IntelliSense.
    2759 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2760 #define VMA_IMPLEMENTATION
    2761 #endif
    2762 
    2763 #ifdef VMA_IMPLEMENTATION
    2764 #undef VMA_IMPLEMENTATION
    2765 
    2766 #include <cstdint>
    2767 #include <cstdlib>
    2768 #include <cstring>
    2769 
    2770 /*******************************************************************************
    2771 CONFIGURATION SECTION
    2772 
    2773 Define some of these macros before each #include of this header or change them
    2774 here if you need other then default behavior depending on your environment.
    2775 */
    2776 
    2777 /*
    2778 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2779 internally, like:
    2780 
    2781  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2782 
    2783 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2784 VmaAllocatorCreateInfo::pVulkanFunctions.
    2785 */
    2786 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2787 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2788 #endif
    2789 
    2790 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2791 //#define VMA_USE_STL_CONTAINERS 1
    2792 
    2793 /* Set this macro to 1 to make the library including and using STL containers:
    2794 std::pair, std::vector, std::list, std::unordered_map.
    2795 
    2796 Set it to 0 or undefined to make the library using its own implementation of
    2797 the containers.
    2798 */
    2799 #if VMA_USE_STL_CONTAINERS
    2800  #define VMA_USE_STL_VECTOR 1
    2801  #define VMA_USE_STL_UNORDERED_MAP 1
    2802  #define VMA_USE_STL_LIST 1
    2803 #endif
    2804 
    2805 #if VMA_USE_STL_VECTOR
    2806  #include <vector>
    2807 #endif
    2808 
    2809 #if VMA_USE_STL_UNORDERED_MAP
    2810  #include <unordered_map>
    2811 #endif
    2812 
    2813 #if VMA_USE_STL_LIST
    2814  #include <list>
    2815 #endif
    2816 
    2817 /*
    2818 Following headers are used in this CONFIGURATION section only, so feel free to
    2819 remove them if not needed.
    2820 */
    2821 #include <cassert> // for assert
    2822 #include <algorithm> // for min, max
    2823 #include <mutex> // for std::mutex
    2824 #include <atomic> // for std::atomic
    2825 
    2826 #ifndef VMA_NULL
    2827  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2828  #define VMA_NULL nullptr
    2829 #endif
    2830 
    2831 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2832 #include <cstdlib>
    2833 void *aligned_alloc(size_t alignment, size_t size)
    2834 {
    2835  // alignment must be >= sizeof(void*)
    2836  if(alignment < sizeof(void*))
    2837  {
    2838  alignment = sizeof(void*);
    2839  }
    2840 
    2841  return memalign(alignment, size);
    2842 }
    2843 #elif defined(__APPLE__) || defined(__ANDROID__)
    2844 #include <cstdlib>
    2845 void *aligned_alloc(size_t alignment, size_t size)
    2846 {
    2847  // alignment must be >= sizeof(void*)
    2848  if(alignment < sizeof(void*))
    2849  {
    2850  alignment = sizeof(void*);
    2851  }
    2852 
    2853  void *pointer;
    2854  if(posix_memalign(&pointer, alignment, size) == 0)
    2855  return pointer;
    2856  return VMA_NULL;
    2857 }
    2858 #endif
    2859 
    2860 // If your compiler is not compatible with C++11 and definition of
    2861 // aligned_alloc() function is missing, uncommeting following line may help:
    2862 
    2863 //#include <malloc.h>
    2864 
    2865 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2866 #ifndef VMA_ASSERT
    2867  #ifdef _DEBUG
    2868  #define VMA_ASSERT(expr) assert(expr)
    2869  #else
    2870  #define VMA_ASSERT(expr)
    2871  #endif
    2872 #endif
    2873 
    2874 // Assert that will be called very often, like inside data structures e.g. operator[].
    2875 // Making it non-empty can make program slow.
    2876 #ifndef VMA_HEAVY_ASSERT
    2877  #ifdef _DEBUG
    2878  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2879  #else
    2880  #define VMA_HEAVY_ASSERT(expr)
    2881  #endif
    2882 #endif
    2883 
    2884 #ifndef VMA_ALIGN_OF
    2885  #define VMA_ALIGN_OF(type) (__alignof(type))
    2886 #endif
    2887 
    2888 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2889  #if defined(_WIN32)
    2890  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2891  #else
    2892  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2893  #endif
    2894 #endif
    2895 
    2896 #ifndef VMA_SYSTEM_FREE
    2897  #if defined(_WIN32)
    2898  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2899  #else
    2900  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2901  #endif
    2902 #endif
    2903 
    2904 #ifndef VMA_MIN
    2905  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2906 #endif
    2907 
    2908 #ifndef VMA_MAX
    2909  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2910 #endif
    2911 
    2912 #ifndef VMA_SWAP
    2913  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2914 #endif
    2915 
    2916 #ifndef VMA_SORT
    2917  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2918 #endif
    2919 
    2920 #ifndef VMA_DEBUG_LOG
    2921  #define VMA_DEBUG_LOG(format, ...)
    2922  /*
    2923  #define VMA_DEBUG_LOG(format, ...) do { \
    2924  printf(format, __VA_ARGS__); \
    2925  printf("\n"); \
    2926  } while(false)
    2927  */
    2928 #endif
    2929 
    2930 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2931 #if VMA_STATS_STRING_ENABLED
    2932  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2933  {
    2934  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2935  }
    2936  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2937  {
    2938  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2939  }
    2940  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2941  {
    2942  snprintf(outStr, strLen, "%p", ptr);
    2943  }
    2944 #endif
    2945 
    2946 #ifndef VMA_MUTEX
    2947  class VmaMutex
    2948  {
    2949  public:
    2950  VmaMutex() { }
    2951  ~VmaMutex() { }
    2952  void Lock() { m_Mutex.lock(); }
    2953  void Unlock() { m_Mutex.unlock(); }
    2954  private:
    2955  std::mutex m_Mutex;
    2956  };
    2957  #define VMA_MUTEX VmaMutex
    2958 #endif
    2959 
    2960 /*
    2961 If providing your own implementation, you need to implement a subset of std::atomic:
    2962 
    2963 - Constructor(uint32_t desired)
    2964 - uint32_t load() const
    2965 - void store(uint32_t desired)
    2966 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2967 */
    2968 #ifndef VMA_ATOMIC_UINT32
    2969  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2970 #endif
    2971 
    2972 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2973 
    2977  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2978 #endif
    2979 
    2980 #ifndef VMA_DEBUG_ALIGNMENT
    2981 
    2985  #define VMA_DEBUG_ALIGNMENT (1)
    2986 #endif
    2987 
    2988 #ifndef VMA_DEBUG_MARGIN
    2989 
    2993  #define VMA_DEBUG_MARGIN (0)
    2994 #endif
    2995 
    2996 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2997 
    3001  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3002 #endif
    3003 
    3004 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3005 
    3010  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3011 #endif
    3012 
    3013 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3014 
    3018  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3019 #endif
    3020 
    3021 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3022 
    3026  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3027 #endif
    3028 
    3029 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3030  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3035  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3037 #endif
    3038 
    3039 #ifndef VMA_CLASS_NO_COPY
    3040  #define VMA_CLASS_NO_COPY(className) \
    3041  private: \
    3042  className(const className&) = delete; \
    3043  className& operator=(const className&) = delete;
    3044 #endif
    3045 
    3046 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3047 
    3048 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3049 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3050 
    3051 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3053 
    3054 /*******************************************************************************
    3055 END OF CONFIGURATION
    3056 */
    3057 
    3058 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3059  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3060 
    3061 // Returns number of bits set to 1 in (v).
    3062 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3063 {
    3064  uint32_t c = v - ((v >> 1) & 0x55555555);
    3065  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3066  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3067  c = ((c >> 8) + c) & 0x00FF00FF;
    3068  c = ((c >> 16) + c) & 0x0000FFFF;
    3069  return c;
    3070 }
    3071 
    3072 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3073 // Use types like uint32_t, uint64_t as T.
    3074 template <typename T>
    3075 static inline T VmaAlignUp(T val, T align)
    3076 {
    3077  return (val + align - 1) / align * align;
    3078 }
    3079 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3080 // Use types like uint32_t, uint64_t as T.
    3081 template <typename T>
    3082 static inline T VmaAlignDown(T val, T align)
    3083 {
    3084  return val / align * align;
    3085 }
    3086 
    3087 // Division with mathematical rounding to nearest number.
    3088 template <typename T>
    3089 static inline T VmaRoundDiv(T x, T y)
    3090 {
    3091  return (x + (y / (T)2)) / y;
    3092 }
    3093 
    3094 /*
    3095 Returns true if given number is a power of two.
    3096 T must be unsigned integer number or signed integer but always nonnegative.
    3097 For 0 returns true.
    3098 */
    3099 template <typename T>
    3100 inline bool VmaIsPow2(T x)
    3101 {
    3102  return (x & (x-1)) == 0;
    3103 }
    3104 
    3105 // Returns smallest power of 2 greater or equal to v.
    3106 static inline uint32_t VmaNextPow2(uint32_t v)
    3107 {
    3108  v--;
    3109  v |= v >> 1;
    3110  v |= v >> 2;
    3111  v |= v >> 4;
    3112  v |= v >> 8;
    3113  v |= v >> 16;
    3114  v++;
    3115  return v;
    3116 }
    3117 static inline uint64_t VmaNextPow2(uint64_t v)
    3118 {
    3119  v--;
    3120  v |= v >> 1;
    3121  v |= v >> 2;
    3122  v |= v >> 4;
    3123  v |= v >> 8;
    3124  v |= v >> 16;
    3125  v |= v >> 32;
    3126  v++;
    3127  return v;
    3128 }
    3129 
    3130 // Returns largest power of 2 less or equal to v.
    3131 static inline uint32_t VmaPrevPow2(uint32_t v)
    3132 {
    3133  v |= v >> 1;
    3134  v |= v >> 2;
    3135  v |= v >> 4;
    3136  v |= v >> 8;
    3137  v |= v >> 16;
    3138  v = v ^ (v >> 1);
    3139  return v;
    3140 }
    3141 static inline uint64_t VmaPrevPow2(uint64_t v)
    3142 {
    3143  v |= v >> 1;
    3144  v |= v >> 2;
    3145  v |= v >> 4;
    3146  v |= v >> 8;
    3147  v |= v >> 16;
    3148  v |= v >> 32;
    3149  v = v ^ (v >> 1);
    3150  return v;
    3151 }
    3152 
    3153 static inline bool VmaStrIsEmpty(const char* pStr)
    3154 {
    3155  return pStr == VMA_NULL || *pStr == '\0';
    3156 }
    3157 
    3158 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3159 {
    3160  switch(algorithm)
    3161  {
    3163  return "Linear";
    3165  return "Buddy";
    3166  case 0:
    3167  return "Default";
    3168  default:
    3169  VMA_ASSERT(0);
    3170  return "";
    3171  }
    3172 }
    3173 
    3174 #ifndef VMA_SORT
    3175 
    3176 template<typename Iterator, typename Compare>
    3177 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3178 {
    3179  Iterator centerValue = end; --centerValue;
    3180  Iterator insertIndex = beg;
    3181  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3182  {
    3183  if(cmp(*memTypeIndex, *centerValue))
    3184  {
    3185  if(insertIndex != memTypeIndex)
    3186  {
    3187  VMA_SWAP(*memTypeIndex, *insertIndex);
    3188  }
    3189  ++insertIndex;
    3190  }
    3191  }
    3192  if(insertIndex != centerValue)
    3193  {
    3194  VMA_SWAP(*insertIndex, *centerValue);
    3195  }
    3196  return insertIndex;
    3197 }
    3198 
    3199 template<typename Iterator, typename Compare>
    3200 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3201 {
    3202  if(beg < end)
    3203  {
    3204  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3205  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3206  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3207  }
    3208 }
    3209 
    3210 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3211 
    3212 #endif // #ifndef VMA_SORT
    3213 
    3214 /*
    3215 Returns true if two memory blocks occupy overlapping pages.
    3216 ResourceA must be in less memory offset than ResourceB.
    3217 
    3218 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3219 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3220 */
    3221 static inline bool VmaBlocksOnSamePage(
    3222  VkDeviceSize resourceAOffset,
    3223  VkDeviceSize resourceASize,
    3224  VkDeviceSize resourceBOffset,
    3225  VkDeviceSize pageSize)
    3226 {
    3227  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3228  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3229  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3230  VkDeviceSize resourceBStart = resourceBOffset;
    3231  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3232  return resourceAEndPage == resourceBStartPage;
    3233 }
    3234 
    3235 enum VmaSuballocationType
    3236 {
    3237  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3238  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3239  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3240  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3243  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3244 };
    3245 
    3246 /*
    3247 Returns true if given suballocation types could conflict and must respect
    3248 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3249 or linear image and another one is optimal image. If type is unknown, behave
    3250 conservatively.
    3251 */
    3252 static inline bool VmaIsBufferImageGranularityConflict(
    3253  VmaSuballocationType suballocType1,
    3254  VmaSuballocationType suballocType2)
    3255 {
    3256  if(suballocType1 > suballocType2)
    3257  {
    3258  VMA_SWAP(suballocType1, suballocType2);
    3259  }
    3260 
    3261  switch(suballocType1)
    3262  {
    3263  case VMA_SUBALLOCATION_TYPE_FREE:
    3264  return false;
    3265  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3266  return true;
    3267  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3268  return
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3276  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3277  return
    3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3279  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3280  return false;
    3281  default:
    3282  VMA_ASSERT(0);
    3283  return true;
    3284  }
    3285 }
    3286 
    3287 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3288 {
    3289  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3290  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3291  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3292  {
    3293  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3294  }
    3295 }
    3296 
    3297 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3298 {
    3299  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3300  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3301  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3302  {
    3303  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3304  {
    3305  return false;
    3306  }
    3307  }
    3308  return true;
    3309 }
    3310 
    3311 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3312 struct VmaMutexLock
    3313 {
    3314  VMA_CLASS_NO_COPY(VmaMutexLock)
    3315 public:
    3316  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3317  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3318  {
    3319  if(m_pMutex)
    3320  {
    3321  m_pMutex->Lock();
    3322  }
    3323  }
    3324 
    3325  ~VmaMutexLock()
    3326  {
    3327  if(m_pMutex)
    3328  {
    3329  m_pMutex->Unlock();
    3330  }
    3331  }
    3332 
    3333 private:
    3334  VMA_MUTEX* m_pMutex;
    3335 };
    3336 
    3337 #if VMA_DEBUG_GLOBAL_MUTEX
    3338  static VMA_MUTEX gDebugGlobalMutex;
    3339  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3340 #else
    3341  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3342 #endif
    3343 
    3344 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3345 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3346 
    3347 /*
    3348 Performs binary search and returns iterator to first element that is greater or
    3349 equal to (key), according to comparison (cmp).
    3350 
    3351 Cmp should return true if first argument is less than second argument.
    3352 
    3353 Returned value is the found element, if present in the collection or place where
    3354 new element with value (key) should be inserted.
    3355 */
    3356 template <typename CmpLess, typename IterT, typename KeyT>
    3357 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3358 {
    3359  size_t down = 0, up = (end - beg);
    3360  while(down < up)
    3361  {
    3362  const size_t mid = (down + up) / 2;
    3363  if(cmp(*(beg+mid), key))
    3364  {
    3365  down = mid + 1;
    3366  }
    3367  else
    3368  {
    3369  up = mid;
    3370  }
    3371  }
    3372  return beg + down;
    3373 }
    3374 
    3376 // Memory allocation
    3377 
    3378 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3379 {
    3380  if((pAllocationCallbacks != VMA_NULL) &&
    3381  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3382  {
    3383  return (*pAllocationCallbacks->pfnAllocation)(
    3384  pAllocationCallbacks->pUserData,
    3385  size,
    3386  alignment,
    3387  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3388  }
    3389  else
    3390  {
    3391  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3392  }
    3393 }
    3394 
    3395 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3396 {
    3397  if((pAllocationCallbacks != VMA_NULL) &&
    3398  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3399  {
    3400  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3401  }
    3402  else
    3403  {
    3404  VMA_SYSTEM_FREE(ptr);
    3405  }
    3406 }
    3407 
    3408 template<typename T>
    3409 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3410 {
    3411  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3412 }
    3413 
    3414 template<typename T>
    3415 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3416 {
    3417  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3418 }
    3419 
    3420 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3421 
    3422 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3423 
    3424 template<typename T>
    3425 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3426 {
    3427  ptr->~T();
    3428  VmaFree(pAllocationCallbacks, ptr);
    3429 }
    3430 
    3431 template<typename T>
    3432 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3433 {
    3434  if(ptr != VMA_NULL)
    3435  {
    3436  for(size_t i = count; i--; )
    3437  {
    3438  ptr[i].~T();
    3439  }
    3440  VmaFree(pAllocationCallbacks, ptr);
    3441  }
    3442 }
    3443 
    3444 // STL-compatible allocator.
    3445 template<typename T>
    3446 class VmaStlAllocator
    3447 {
    3448 public:
    3449  const VkAllocationCallbacks* const m_pCallbacks;
    3450  typedef T value_type;
    3451 
    3452  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3453  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3454 
    3455  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3456  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3457 
    3458  template<typename U>
    3459  bool operator==(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks == rhs.m_pCallbacks;
    3462  }
    3463  template<typename U>
    3464  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3465  {
    3466  return m_pCallbacks != rhs.m_pCallbacks;
    3467  }
    3468 
    3469  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3470 };
    3471 
    3472 #if VMA_USE_STL_VECTOR
    3473 
    3474 #define VmaVector std::vector
    3475 
    3476 template<typename T, typename allocatorT>
    3477 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3478 {
    3479  vec.insert(vec.begin() + index, item);
    3480 }
    3481 
    3482 template<typename T, typename allocatorT>
    3483 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3484 {
    3485  vec.erase(vec.begin() + index);
    3486 }
    3487 
    3488 #else // #if VMA_USE_STL_VECTOR
    3489 
    3490 /* Class with interface compatible with subset of std::vector.
    3491 T must be POD because constructors and destructors are not called and memcpy is
    3492 used for these objects. */
    3493 template<typename T, typename AllocatorT>
    3494 class VmaVector
    3495 {
    3496 public:
    3497  typedef T value_type;
    3498 
    3499  VmaVector(const AllocatorT& allocator) :
    3500  m_Allocator(allocator),
    3501  m_pArray(VMA_NULL),
    3502  m_Count(0),
    3503  m_Capacity(0)
    3504  {
    3505  }
    3506 
    3507  VmaVector(size_t count, const AllocatorT& allocator) :
    3508  m_Allocator(allocator),
    3509  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3510  m_Count(count),
    3511  m_Capacity(count)
    3512  {
    3513  }
    3514 
    3515  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3516  m_Allocator(src.m_Allocator),
    3517  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3518  m_Count(src.m_Count),
    3519  m_Capacity(src.m_Count)
    3520  {
    3521  if(m_Count != 0)
    3522  {
    3523  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3524  }
    3525  }
    3526 
    3527  ~VmaVector()
    3528  {
    3529  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3530  }
    3531 
    3532  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3533  {
    3534  if(&rhs != this)
    3535  {
    3536  resize(rhs.m_Count);
    3537  if(m_Count != 0)
    3538  {
    3539  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3540  }
    3541  }
    3542  return *this;
    3543  }
    3544 
    3545  bool empty() const { return m_Count == 0; }
    3546  size_t size() const { return m_Count; }
    3547  T* data() { return m_pArray; }
    3548  const T* data() const { return m_pArray; }
    3549 
    3550  T& operator[](size_t index)
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555  const T& operator[](size_t index) const
    3556  {
    3557  VMA_HEAVY_ASSERT(index < m_Count);
    3558  return m_pArray[index];
    3559  }
    3560 
    3561  T& front()
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  const T& front() const
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[0];
    3570  }
    3571  T& back()
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576  const T& back() const
    3577  {
    3578  VMA_HEAVY_ASSERT(m_Count > 0);
    3579  return m_pArray[m_Count - 1];
    3580  }
    3581 
    3582  void reserve(size_t newCapacity, bool freeMemory = false)
    3583  {
    3584  newCapacity = VMA_MAX(newCapacity, m_Count);
    3585 
    3586  if((newCapacity < m_Capacity) && !freeMemory)
    3587  {
    3588  newCapacity = m_Capacity;
    3589  }
    3590 
    3591  if(newCapacity != m_Capacity)
    3592  {
    3593  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3594  if(m_Count != 0)
    3595  {
    3596  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3597  }
    3598  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3599  m_Capacity = newCapacity;
    3600  m_pArray = newArray;
    3601  }
    3602  }
    3603 
    3604  void resize(size_t newCount, bool freeMemory = false)
    3605  {
    3606  size_t newCapacity = m_Capacity;
    3607  if(newCount > m_Capacity)
    3608  {
    3609  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3610  }
    3611  else if(freeMemory)
    3612  {
    3613  newCapacity = newCount;
    3614  }
    3615 
    3616  if(newCapacity != m_Capacity)
    3617  {
    3618  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3619  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3620  if(elementsToCopy != 0)
    3621  {
    3622  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3623  }
    3624  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3625  m_Capacity = newCapacity;
    3626  m_pArray = newArray;
    3627  }
    3628 
    3629  m_Count = newCount;
    3630  }
    3631 
    3632  void clear(bool freeMemory = false)
    3633  {
    3634  resize(0, freeMemory);
    3635  }
    3636 
    3637  void insert(size_t index, const T& src)
    3638  {
    3639  VMA_HEAVY_ASSERT(index <= m_Count);
    3640  const size_t oldCount = size();
    3641  resize(oldCount + 1);
    3642  if(index < oldCount)
    3643  {
    3644  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3645  }
    3646  m_pArray[index] = src;
    3647  }
    3648 
    3649  void remove(size_t index)
    3650  {
    3651  VMA_HEAVY_ASSERT(index < m_Count);
    3652  const size_t oldCount = size();
    3653  if(index < oldCount - 1)
    3654  {
    3655  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3656  }
    3657  resize(oldCount - 1);
    3658  }
    3659 
    3660  void push_back(const T& src)
    3661  {
    3662  const size_t newIndex = size();
    3663  resize(newIndex + 1);
    3664  m_pArray[newIndex] = src;
    3665  }
    3666 
    3667  void pop_back()
    3668  {
    3669  VMA_HEAVY_ASSERT(m_Count > 0);
    3670  resize(size() - 1);
    3671  }
    3672 
    3673  void push_front(const T& src)
    3674  {
    3675  insert(0, src);
    3676  }
    3677 
    3678  void pop_front()
    3679  {
    3680  VMA_HEAVY_ASSERT(m_Count > 0);
    3681  remove(0);
    3682  }
    3683 
    3684  typedef T* iterator;
    3685 
    3686  iterator begin() { return m_pArray; }
    3687  iterator end() { return m_pArray + m_Count; }
    3688 
    3689 private:
    3690  AllocatorT m_Allocator;
    3691  T* m_pArray;
    3692  size_t m_Count;
    3693  size_t m_Capacity;
    3694 };
    3695 
    3696 template<typename T, typename allocatorT>
    3697 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3698 {
    3699  vec.insert(index, item);
    3700 }
    3701 
    3702 template<typename T, typename allocatorT>
    3703 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3704 {
    3705  vec.remove(index);
    3706 }
    3707 
    3708 #endif // #if VMA_USE_STL_VECTOR
    3709 
    3710 template<typename CmpLess, typename VectorT>
    3711 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3712 {
    3713  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3714  vector.data(),
    3715  vector.data() + vector.size(),
    3716  value,
    3717  CmpLess()) - vector.data();
    3718  VmaVectorInsert(vector, indexToInsert, value);
    3719  return indexToInsert;
    3720 }
    3721 
    3722 template<typename CmpLess, typename VectorT>
    3723 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3724 {
    3725  CmpLess comparator;
    3726  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3727  vector.begin(),
    3728  vector.end(),
    3729  value,
    3730  comparator);
    3731  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3732  {
    3733  size_t indexToRemove = it - vector.begin();
    3734  VmaVectorRemove(vector, indexToRemove);
    3735  return true;
    3736  }
    3737  return false;
    3738 }
    3739 
    3740 template<typename CmpLess, typename IterT, typename KeyT>
    3741 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3742 {
    3743  CmpLess comparator;
    3744  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3745  beg, end, value, comparator);
    3746  if(it == end ||
    3747  (!comparator(*it, value) && !comparator(value, *it)))
    3748  {
    3749  return it;
    3750  }
    3751  return end;
    3752 }
    3753 
    3755 // class VmaPoolAllocator
    3756 
    3757 /*
    3758 Allocator for objects of type T using a list of arrays (pools) to speed up
    3759 allocation. Number of elements that can be allocated is not bounded because
    3760 allocator can create multiple blocks.
    3761 */
    3762 template<typename T>
    3763 class VmaPoolAllocator
    3764 {
    3765  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3766 public:
    3767  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3768  ~VmaPoolAllocator();
    3769  void Clear();
    3770  T* Alloc();
    3771  void Free(T* ptr);
    3772 
    3773 private:
    3774  union Item
    3775  {
    3776  uint32_t NextFreeIndex;
    3777  T Value;
    3778  };
    3779 
    3780  struct ItemBlock
    3781  {
    3782  Item* pItems;
    3783  uint32_t FirstFreeIndex;
    3784  };
    3785 
    3786  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3787  size_t m_ItemsPerBlock;
    3788  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3789 
    3790  ItemBlock& CreateNewBlock();
    3791 };
    3792 
    3793 template<typename T>
    3794 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3795  m_pAllocationCallbacks(pAllocationCallbacks),
    3796  m_ItemsPerBlock(itemsPerBlock),
    3797  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3798 {
    3799  VMA_ASSERT(itemsPerBlock > 0);
    3800 }
    3801 
    3802 template<typename T>
    3803 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3804 {
    3805  Clear();
    3806 }
    3807 
    3808 template<typename T>
    3809 void VmaPoolAllocator<T>::Clear()
    3810 {
    3811  for(size_t i = m_ItemBlocks.size(); i--; )
    3812  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3813  m_ItemBlocks.clear();
    3814 }
    3815 
    3816 template<typename T>
    3817 T* VmaPoolAllocator<T>::Alloc()
    3818 {
    3819  for(size_t i = m_ItemBlocks.size(); i--; )
    3820  {
    3821  ItemBlock& block = m_ItemBlocks[i];
    3822  // This block has some free items: Use first one.
    3823  if(block.FirstFreeIndex != UINT32_MAX)
    3824  {
    3825  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3826  block.FirstFreeIndex = pItem->NextFreeIndex;
    3827  return &pItem->Value;
    3828  }
    3829  }
    3830 
    3831  // No block has free item: Create new one and use it.
    3832  ItemBlock& newBlock = CreateNewBlock();
    3833  Item* const pItem = &newBlock.pItems[0];
    3834  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3835  return &pItem->Value;
    3836 }
    3837 
    3838 template<typename T>
    3839 void VmaPoolAllocator<T>::Free(T* ptr)
    3840 {
    3841  // Search all memory blocks to find ptr.
    3842  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3843  {
    3844  ItemBlock& block = m_ItemBlocks[i];
    3845 
    3846  // Casting to union.
    3847  Item* pItemPtr;
    3848  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3849 
    3850  // Check if pItemPtr is in address range of this block.
    3851  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3852  {
    3853  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3854  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3855  block.FirstFreeIndex = index;
    3856  return;
    3857  }
    3858  }
    3859  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3860 }
    3861 
    3862 template<typename T>
    3863 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3864 {
    3865  ItemBlock newBlock = {
    3866  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3867 
    3868  m_ItemBlocks.push_back(newBlock);
    3869 
    3870  // Setup singly-linked list of all free items in this block.
    3871  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3872  newBlock.pItems[i].NextFreeIndex = i + 1;
    3873  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3874  return m_ItemBlocks.back();
    3875 }
    3876 
    3878 // class VmaRawList, VmaList
    3879 
    3880 #if VMA_USE_STL_LIST
    3881 
    3882 #define VmaList std::list
    3883 
    3884 #else // #if VMA_USE_STL_LIST
    3885 
    3886 template<typename T>
    3887 struct VmaListItem
    3888 {
    3889  VmaListItem* pPrev;
    3890  VmaListItem* pNext;
    3891  T Value;
    3892 };
    3893 
    3894 // Doubly linked list.
    3895 template<typename T>
    3896 class VmaRawList
    3897 {
    3898  VMA_CLASS_NO_COPY(VmaRawList)
    3899 public:
    3900  typedef VmaListItem<T> ItemType;
    3901 
    3902  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3903  ~VmaRawList();
    3904  void Clear();
    3905 
    3906  size_t GetCount() const { return m_Count; }
    3907  bool IsEmpty() const { return m_Count == 0; }
    3908 
    3909  ItemType* Front() { return m_pFront; }
    3910  const ItemType* Front() const { return m_pFront; }
    3911  ItemType* Back() { return m_pBack; }
    3912  const ItemType* Back() const { return m_pBack; }
    3913 
    3914  ItemType* PushBack();
    3915  ItemType* PushFront();
    3916  ItemType* PushBack(const T& value);
    3917  ItemType* PushFront(const T& value);
    3918  void PopBack();
    3919  void PopFront();
    3920 
    3921  // Item can be null - it means PushBack.
    3922  ItemType* InsertBefore(ItemType* pItem);
    3923  // Item can be null - it means PushFront.
    3924  ItemType* InsertAfter(ItemType* pItem);
    3925 
    3926  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3927  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3928 
    3929  void Remove(ItemType* pItem);
    3930 
    3931 private:
    3932  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3933  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3934  ItemType* m_pFront;
    3935  ItemType* m_pBack;
    3936  size_t m_Count;
    3937 };
    3938 
    3939 template<typename T>
    3940 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3941  m_pAllocationCallbacks(pAllocationCallbacks),
    3942  m_ItemAllocator(pAllocationCallbacks, 128),
    3943  m_pFront(VMA_NULL),
    3944  m_pBack(VMA_NULL),
    3945  m_Count(0)
    3946 {
    3947 }
    3948 
    3949 template<typename T>
    3950 VmaRawList<T>::~VmaRawList()
    3951 {
    3952  // Intentionally not calling Clear, because that would be unnecessary
    3953  // computations to return all items to m_ItemAllocator as free.
    3954 }
    3955 
    3956 template<typename T>
    3957 void VmaRawList<T>::Clear()
    3958 {
    3959  if(IsEmpty() == false)
    3960  {
    3961  ItemType* pItem = m_pBack;
    3962  while(pItem != VMA_NULL)
    3963  {
    3964  ItemType* const pPrevItem = pItem->pPrev;
    3965  m_ItemAllocator.Free(pItem);
    3966  pItem = pPrevItem;
    3967  }
    3968  m_pFront = VMA_NULL;
    3969  m_pBack = VMA_NULL;
    3970  m_Count = 0;
    3971  }
    3972 }
    3973 
    3974 template<typename T>
    3975 VmaListItem<T>* VmaRawList<T>::PushBack()
    3976 {
    3977  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3978  pNewItem->pNext = VMA_NULL;
    3979  if(IsEmpty())
    3980  {
    3981  pNewItem->pPrev = VMA_NULL;
    3982  m_pFront = pNewItem;
    3983  m_pBack = pNewItem;
    3984  m_Count = 1;
    3985  }
    3986  else
    3987  {
    3988  pNewItem->pPrev = m_pBack;
    3989  m_pBack->pNext = pNewItem;
    3990  m_pBack = pNewItem;
    3991  ++m_Count;
    3992  }
    3993  return pNewItem;
    3994 }
    3995 
    3996 template<typename T>
    3997 VmaListItem<T>* VmaRawList<T>::PushFront()
    3998 {
    3999  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4000  pNewItem->pPrev = VMA_NULL;
    4001  if(IsEmpty())
    4002  {
    4003  pNewItem->pNext = VMA_NULL;
    4004  m_pFront = pNewItem;
    4005  m_pBack = pNewItem;
    4006  m_Count = 1;
    4007  }
    4008  else
    4009  {
    4010  pNewItem->pNext = m_pFront;
    4011  m_pFront->pPrev = pNewItem;
    4012  m_pFront = pNewItem;
    4013  ++m_Count;
    4014  }
    4015  return pNewItem;
    4016 }
    4017 
    4018 template<typename T>
    4019 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4020 {
    4021  ItemType* const pNewItem = PushBack();
    4022  pNewItem->Value = value;
    4023  return pNewItem;
    4024 }
    4025 
    4026 template<typename T>
    4027 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4028 {
    4029  ItemType* const pNewItem = PushFront();
    4030  pNewItem->Value = value;
    4031  return pNewItem;
    4032 }
    4033 
    4034 template<typename T>
    4035 void VmaRawList<T>::PopBack()
    4036 {
    4037  VMA_HEAVY_ASSERT(m_Count > 0);
    4038  ItemType* const pBackItem = m_pBack;
    4039  ItemType* const pPrevItem = pBackItem->pPrev;
    4040  if(pPrevItem != VMA_NULL)
    4041  {
    4042  pPrevItem->pNext = VMA_NULL;
    4043  }
    4044  m_pBack = pPrevItem;
    4045  m_ItemAllocator.Free(pBackItem);
    4046  --m_Count;
    4047 }
    4048 
    4049 template<typename T>
    4050 void VmaRawList<T>::PopFront()
    4051 {
    4052  VMA_HEAVY_ASSERT(m_Count > 0);
    4053  ItemType* const pFrontItem = m_pFront;
    4054  ItemType* const pNextItem = pFrontItem->pNext;
    4055  if(pNextItem != VMA_NULL)
    4056  {
    4057  pNextItem->pPrev = VMA_NULL;
    4058  }
    4059  m_pFront = pNextItem;
    4060  m_ItemAllocator.Free(pFrontItem);
    4061  --m_Count;
    4062 }
    4063 
    4064 template<typename T>
    4065 void VmaRawList<T>::Remove(ItemType* pItem)
    4066 {
    4067  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4068  VMA_HEAVY_ASSERT(m_Count > 0);
    4069 
    4070  if(pItem->pPrev != VMA_NULL)
    4071  {
    4072  pItem->pPrev->pNext = pItem->pNext;
    4073  }
    4074  else
    4075  {
    4076  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4077  m_pFront = pItem->pNext;
    4078  }
    4079 
    4080  if(pItem->pNext != VMA_NULL)
    4081  {
    4082  pItem->pNext->pPrev = pItem->pPrev;
    4083  }
    4084  else
    4085  {
    4086  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4087  m_pBack = pItem->pPrev;
    4088  }
    4089 
    4090  m_ItemAllocator.Free(pItem);
    4091  --m_Count;
    4092 }
    4093 
    4094 template<typename T>
    4095 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4096 {
    4097  if(pItem != VMA_NULL)
    4098  {
    4099  ItemType* const prevItem = pItem->pPrev;
    4100  ItemType* const newItem = m_ItemAllocator.Alloc();
    4101  newItem->pPrev = prevItem;
    4102  newItem->pNext = pItem;
    4103  pItem->pPrev = newItem;
    4104  if(prevItem != VMA_NULL)
    4105  {
    4106  prevItem->pNext = newItem;
    4107  }
    4108  else
    4109  {
    4110  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4111  m_pFront = newItem;
    4112  }
    4113  ++m_Count;
    4114  return newItem;
    4115  }
    4116  else
    4117  return PushBack();
    4118 }
    4119 
    4120 template<typename T>
    4121 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4122 {
    4123  if(pItem != VMA_NULL)
    4124  {
    4125  ItemType* const nextItem = pItem->pNext;
    4126  ItemType* const newItem = m_ItemAllocator.Alloc();
    4127  newItem->pNext = nextItem;
    4128  newItem->pPrev = pItem;
    4129  pItem->pNext = newItem;
    4130  if(nextItem != VMA_NULL)
    4131  {
    4132  nextItem->pPrev = newItem;
    4133  }
    4134  else
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4137  m_pBack = newItem;
    4138  }
    4139  ++m_Count;
    4140  return newItem;
    4141  }
    4142  else
    4143  return PushFront();
    4144 }
    4145 
    4146 template<typename T>
    4147 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4148 {
    4149  ItemType* const newItem = InsertBefore(pItem);
    4150  newItem->Value = value;
    4151  return newItem;
    4152 }
    4153 
    4154 template<typename T>
    4155 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4156 {
    4157  ItemType* const newItem = InsertAfter(pItem);
    4158  newItem->Value = value;
    4159  return newItem;
    4160 }
    4161 
    4162 template<typename T, typename AllocatorT>
    4163 class VmaList
    4164 {
    4165  VMA_CLASS_NO_COPY(VmaList)
    4166 public:
    4167  class iterator
    4168  {
    4169  public:
    4170  iterator() :
    4171  m_pList(VMA_NULL),
    4172  m_pItem(VMA_NULL)
    4173  {
    4174  }
    4175 
    4176  T& operator*() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return m_pItem->Value;
    4180  }
    4181  T* operator->() const
    4182  {
    4183  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4184  return &m_pItem->Value;
    4185  }
    4186 
    4187  iterator& operator++()
    4188  {
    4189  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4190  m_pItem = m_pItem->pNext;
    4191  return *this;
    4192  }
    4193  iterator& operator--()
    4194  {
    4195  if(m_pItem != VMA_NULL)
    4196  {
    4197  m_pItem = m_pItem->pPrev;
    4198  }
    4199  else
    4200  {
    4201  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4202  m_pItem = m_pList->Back();
    4203  }
    4204  return *this;
    4205  }
    4206 
    4207  iterator operator++(int)
    4208  {
    4209  iterator result = *this;
    4210  ++*this;
    4211  return result;
    4212  }
    4213  iterator operator--(int)
    4214  {
    4215  iterator result = *this;
    4216  --*this;
    4217  return result;
    4218  }
    4219 
    4220  bool operator==(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem == rhs.m_pItem;
    4224  }
    4225  bool operator!=(const iterator& rhs) const
    4226  {
    4227  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4228  return m_pItem != rhs.m_pItem;
    4229  }
    4230 
    4231  private:
    4232  VmaRawList<T>* m_pList;
    4233  VmaListItem<T>* m_pItem;
    4234 
    4235  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4236  m_pList(pList),
    4237  m_pItem(pItem)
    4238  {
    4239  }
    4240 
    4241  friend class VmaList<T, AllocatorT>;
    4242  };
    4243 
    4244  class const_iterator
    4245  {
    4246  public:
    4247  const_iterator() :
    4248  m_pList(VMA_NULL),
    4249  m_pItem(VMA_NULL)
    4250  {
    4251  }
    4252 
    4253  const_iterator(const iterator& src) :
    4254  m_pList(src.m_pList),
    4255  m_pItem(src.m_pItem)
    4256  {
    4257  }
    4258 
    4259  const T& operator*() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return m_pItem->Value;
    4263  }
    4264  const T* operator->() const
    4265  {
    4266  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4267  return &m_pItem->Value;
    4268  }
    4269 
    4270  const_iterator& operator++()
    4271  {
    4272  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4273  m_pItem = m_pItem->pNext;
    4274  return *this;
    4275  }
    4276  const_iterator& operator--()
    4277  {
    4278  if(m_pItem != VMA_NULL)
    4279  {
    4280  m_pItem = m_pItem->pPrev;
    4281  }
    4282  else
    4283  {
    4284  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4285  m_pItem = m_pList->Back();
    4286  }
    4287  return *this;
    4288  }
    4289 
    4290  const_iterator operator++(int)
    4291  {
    4292  const_iterator result = *this;
    4293  ++*this;
    4294  return result;
    4295  }
    4296  const_iterator operator--(int)
    4297  {
    4298  const_iterator result = *this;
    4299  --*this;
    4300  return result;
    4301  }
    4302 
    4303  bool operator==(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem == rhs.m_pItem;
    4307  }
    4308  bool operator!=(const const_iterator& rhs) const
    4309  {
    4310  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4311  return m_pItem != rhs.m_pItem;
    4312  }
    4313 
    4314  private:
    4315  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4316  m_pList(pList),
    4317  m_pItem(pItem)
    4318  {
    4319  }
    4320 
    4321  const VmaRawList<T>* m_pList;
    4322  const VmaListItem<T>* m_pItem;
    4323 
    4324  friend class VmaList<T, AllocatorT>;
    4325  };
    4326 
    4327  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4328 
    4329  bool empty() const { return m_RawList.IsEmpty(); }
    4330  size_t size() const { return m_RawList.GetCount(); }
    4331 
    4332  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4333  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4334 
    4335  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4336  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4337 
    4338  void clear() { m_RawList.Clear(); }
    4339  void push_back(const T& value) { m_RawList.PushBack(value); }
    4340  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4341  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4342 
    4343 private:
    4344  VmaRawList<T> m_RawList;
    4345 };
    4346 
    4347 #endif // #if VMA_USE_STL_LIST
    4348 
    4350 // class VmaMap
    4351 
    4352 // Unused in this version.
    4353 #if 0
    4354 
    4355 #if VMA_USE_STL_UNORDERED_MAP
    4356 
    4357 #define VmaPair std::pair
    4358 
    4359 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4360  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4361 
    4362 #else // #if VMA_USE_STL_UNORDERED_MAP
    4363 
    4364 template<typename T1, typename T2>
    4365 struct VmaPair
    4366 {
    4367  T1 first;
    4368  T2 second;
    4369 
    4370  VmaPair() : first(), second() { }
    4371  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4372 };
    4373 
    4374 /* Class compatible with subset of interface of std::unordered_map.
    4375 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4376 */
    4377 template<typename KeyT, typename ValueT>
    4378 class VmaMap
    4379 {
    4380 public:
    4381  typedef VmaPair<KeyT, ValueT> PairType;
    4382  typedef PairType* iterator;
    4383 
    4384  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4385 
    4386  iterator begin() { return m_Vector.begin(); }
    4387  iterator end() { return m_Vector.end(); }
    4388 
    4389  void insert(const PairType& pair);
    4390  iterator find(const KeyT& key);
    4391  void erase(iterator it);
    4392 
    4393 private:
    4394  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4395 };
    4396 
    4397 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4398 
    4399 template<typename FirstT, typename SecondT>
    4400 struct VmaPairFirstLess
    4401 {
    4402  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4403  {
    4404  return lhs.first < rhs.first;
    4405  }
    4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4407  {
    4408  return lhs.first < rhsFirst;
    4409  }
    4410 };
    4411 
    4412 template<typename KeyT, typename ValueT>
    4413 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4414 {
    4415  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4416  m_Vector.data(),
    4417  m_Vector.data() + m_Vector.size(),
    4418  pair,
    4419  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4420  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4421 }
    4422 
    4423 template<typename KeyT, typename ValueT>
    4424 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4425 {
    4426  PairType* it = VmaBinaryFindFirstNotLess(
    4427  m_Vector.data(),
    4428  m_Vector.data() + m_Vector.size(),
    4429  key,
    4430  VmaPairFirstLess<KeyT, ValueT>());
    4431  if((it != m_Vector.end()) && (it->first == key))
    4432  {
    4433  return it;
    4434  }
    4435  else
    4436  {
    4437  return m_Vector.end();
    4438  }
    4439 }
    4440 
    4441 template<typename KeyT, typename ValueT>
    4442 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4443 {
    4444  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4445 }
    4446 
    4447 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4448 
    4449 #endif // #if 0
    4450 
    4452 
    4453 class VmaDeviceMemoryBlock;
    4454 
    4455 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4456 
    4457 struct VmaAllocation_T
    4458 {
    4459  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4460 private:
    4461  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4462 
    4463  enum FLAGS
    4464  {
    4465  FLAG_USER_DATA_STRING = 0x01,
    4466  };
    4467 
    4468 public:
    4469  enum ALLOCATION_TYPE
    4470  {
    4471  ALLOCATION_TYPE_NONE,
    4472  ALLOCATION_TYPE_BLOCK,
    4473  ALLOCATION_TYPE_DEDICATED,
    4474  };
    4475 
    4476  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4477  m_Alignment(1),
    4478  m_Size(0),
    4479  m_pUserData(VMA_NULL),
    4480  m_LastUseFrameIndex(currentFrameIndex),
    4481  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4482  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4483  m_MapCount(0),
    4484  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4485  {
    4486 #if VMA_STATS_STRING_ENABLED
    4487  m_CreationFrameIndex = currentFrameIndex;
    4488  m_BufferImageUsage = 0;
    4489 #endif
    4490  }
    4491 
    4492  ~VmaAllocation_T()
    4493  {
    4494  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4495 
    4496  // Check if owned string was freed.
    4497  VMA_ASSERT(m_pUserData == VMA_NULL);
    4498  }
    4499 
    4500  void InitBlockAllocation(
    4501  VmaPool hPool,
    4502  VmaDeviceMemoryBlock* block,
    4503  VkDeviceSize offset,
    4504  VkDeviceSize alignment,
    4505  VkDeviceSize size,
    4506  VmaSuballocationType suballocationType,
    4507  bool mapped,
    4508  bool canBecomeLost)
    4509  {
    4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4511  VMA_ASSERT(block != VMA_NULL);
    4512  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4513  m_Alignment = alignment;
    4514  m_Size = size;
    4515  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4516  m_SuballocationType = (uint8_t)suballocationType;
    4517  m_BlockAllocation.m_hPool = hPool;
    4518  m_BlockAllocation.m_Block = block;
    4519  m_BlockAllocation.m_Offset = offset;
    4520  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4521  }
    4522 
    4523  void InitLost()
    4524  {
    4525  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4526  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4527  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4528  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4529  m_BlockAllocation.m_Block = VMA_NULL;
    4530  m_BlockAllocation.m_Offset = 0;
    4531  m_BlockAllocation.m_CanBecomeLost = true;
    4532  }
    4533 
    4534  void ChangeBlockAllocation(
    4535  VmaAllocator hAllocator,
    4536  VmaDeviceMemoryBlock* block,
    4537  VkDeviceSize offset);
    4538 
    4539  void ChangeSize(VkDeviceSize newSize);
    4540 
    4541  // pMappedData not null means allocation is created with MAPPED flag.
    4542  void InitDedicatedAllocation(
    4543  uint32_t memoryTypeIndex,
    4544  VkDeviceMemory hMemory,
    4545  VmaSuballocationType suballocationType,
    4546  void* pMappedData,
    4547  VkDeviceSize size)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4550  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4551  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4552  m_Alignment = 0;
    4553  m_Size = size;
    4554  m_SuballocationType = (uint8_t)suballocationType;
    4555  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4556  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4557  m_DedicatedAllocation.m_hMemory = hMemory;
    4558  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4559  }
    4560 
    4561  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4562  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4563  VkDeviceSize GetSize() const { return m_Size; }
    4564  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4565  void* GetUserData() const { return m_pUserData; }
    4566  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4567  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4568 
    4569  VmaDeviceMemoryBlock* GetBlock() const
    4570  {
    4571  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4572  return m_BlockAllocation.m_Block;
    4573  }
    4574  VkDeviceSize GetOffset() const;
    4575  VkDeviceMemory GetMemory() const;
    4576  uint32_t GetMemoryTypeIndex() const;
    4577  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4578  void* GetMappedData() const;
    4579  bool CanBecomeLost() const;
    4580  VmaPool GetPool() const;
    4581 
    4582  uint32_t GetLastUseFrameIndex() const
    4583  {
    4584  return m_LastUseFrameIndex.load();
    4585  }
    4586  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4587  {
    4588  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4589  }
    4590  /*
    4591  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4592  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4593  - Else, returns false.
    4594 
    4595  If hAllocation is already lost, assert - you should not call it then.
    4596  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4597  */
    4598  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4599 
    4600  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4601  {
    4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4603  outInfo.blockCount = 1;
    4604  outInfo.allocationCount = 1;
    4605  outInfo.unusedRangeCount = 0;
    4606  outInfo.usedBytes = m_Size;
    4607  outInfo.unusedBytes = 0;
    4608  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4609  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4610  outInfo.unusedRangeSizeMax = 0;
    4611  }
    4612 
    4613  void BlockAllocMap();
    4614  void BlockAllocUnmap();
    4615  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4616  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4617 
    4618 #if VMA_STATS_STRING_ENABLED
    4619  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4620  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4621 
    4622  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4623  {
    4624  VMA_ASSERT(m_BufferImageUsage == 0);
    4625  m_BufferImageUsage = bufferImageUsage;
    4626  }
    4627 
    4628  void PrintParameters(class VmaJsonWriter& json) const;
    4629 #endif
    4630 
    4631 private:
    4632  VkDeviceSize m_Alignment;
    4633  VkDeviceSize m_Size;
    4634  void* m_pUserData;
    4635  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4636  uint8_t m_Type; // ALLOCATION_TYPE
    4637  uint8_t m_SuballocationType; // VmaSuballocationType
    4638  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4639  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4640  uint8_t m_MapCount;
    4641  uint8_t m_Flags; // enum FLAGS
    4642 
    4643  // Allocation out of VmaDeviceMemoryBlock.
    4644  struct BlockAllocation
    4645  {
    4646  VmaPool m_hPool; // Null if belongs to general memory.
    4647  VmaDeviceMemoryBlock* m_Block;
    4648  VkDeviceSize m_Offset;
    4649  bool m_CanBecomeLost;
    4650  };
    4651 
    4652  // Allocation for an object that has its own private VkDeviceMemory.
    4653  struct DedicatedAllocation
    4654  {
    4655  uint32_t m_MemoryTypeIndex;
    4656  VkDeviceMemory m_hMemory;
    4657  void* m_pMappedData; // Not null means memory is mapped.
    4658  };
    4659 
    4660  union
    4661  {
    4662  // Allocation out of VmaDeviceMemoryBlock.
    4663  BlockAllocation m_BlockAllocation;
    4664  // Allocation for an object that has its own private VkDeviceMemory.
    4665  DedicatedAllocation m_DedicatedAllocation;
    4666  };
    4667 
    4668 #if VMA_STATS_STRING_ENABLED
    4669  uint32_t m_CreationFrameIndex;
    4670  uint32_t m_BufferImageUsage; // 0 if unknown.
    4671 #endif
    4672 
    4673  void FreeUserDataString(VmaAllocator hAllocator);
    4674 };
    4675 
    4676 /*
    4677 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4678 allocated memory block or free.
    4679 */
    4680 struct VmaSuballocation
    4681 {
    4682  VkDeviceSize offset;
    4683  VkDeviceSize size;
    4684  VmaAllocation hAllocation;
    4685  VmaSuballocationType type;
    4686 };
    4687 
    4688 // Comparator for offsets.
    4689 struct VmaSuballocationOffsetLess
    4690 {
    4691  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4692  {
    4693  return lhs.offset < rhs.offset;
    4694  }
    4695 };
    4696 struct VmaSuballocationOffsetGreater
    4697 {
    4698  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4699  {
    4700  return lhs.offset > rhs.offset;
    4701  }
    4702 };
    4703 
    4704 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4705 
    4706 // Cost of one additional allocation lost, as equivalent in bytes.
    4707 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4708 
    4709 /*
    4710 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4711 
    4712 If canMakeOtherLost was false:
    4713 - item points to a FREE suballocation.
    4714 - itemsToMakeLostCount is 0.
    4715 
    4716 If canMakeOtherLost was true:
    4717 - item points to first of sequence of suballocations, which are either FREE,
    4718  or point to VmaAllocations that can become lost.
    4719 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4720  the requested allocation to succeed.
    4721 */
    4722 struct VmaAllocationRequest
    4723 {
    4724  VkDeviceSize offset;
    4725  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4726  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4727  VmaSuballocationList::iterator item;
    4728  size_t itemsToMakeLostCount;
    4729  void* customData;
    4730 
    4731  VkDeviceSize CalcCost() const
    4732  {
    4733  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4734  }
    4735 };
    4736 
    4737 /*
    4738 Data structure used for bookkeeping of allocations and unused ranges of memory
    4739 in a single VkDeviceMemory block.
    4740 */
    4741 class VmaBlockMetadata
    4742 {
    4743 public:
    4744  VmaBlockMetadata(VmaAllocator hAllocator);
    4745  virtual ~VmaBlockMetadata() { }
    4746  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4747 
    4748  // Validates all data structures inside this object. If not valid, returns false.
    4749  virtual bool Validate() const = 0;
    4750  VkDeviceSize GetSize() const { return m_Size; }
    4751  virtual size_t GetAllocationCount() const = 0;
    4752  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4753  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4754  // Returns true if this block is empty - contains only single free suballocation.
    4755  virtual bool IsEmpty() const = 0;
    4756 
    4757  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4758  // Shouldn't modify blockCount.
    4759  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4760 
    4761 #if VMA_STATS_STRING_ENABLED
    4762  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4763 #endif
    4764 
    4765  // Tries to find a place for suballocation with given parameters inside this block.
    4766  // If succeeded, fills pAllocationRequest and returns true.
    4767  // If failed, returns false.
    4768  virtual bool CreateAllocationRequest(
    4769  uint32_t currentFrameIndex,
    4770  uint32_t frameInUseCount,
    4771  VkDeviceSize bufferImageGranularity,
    4772  VkDeviceSize allocSize,
    4773  VkDeviceSize allocAlignment,
    4774  bool upperAddress,
    4775  VmaSuballocationType allocType,
    4776  bool canMakeOtherLost,
    4777  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual bool MakeRequestedAllocationsLost(
    4781  uint32_t currentFrameIndex,
    4782  uint32_t frameInUseCount,
    4783  VmaAllocationRequest* pAllocationRequest) = 0;
    4784 
    4785  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4786 
    4787  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4788 
    4789  // Makes actual allocation based on request. Request must already be checked and valid.
    4790  virtual void Alloc(
    4791  const VmaAllocationRequest& request,
    4792  VmaSuballocationType type,
    4793  VkDeviceSize allocSize,
    4794  bool upperAddress,
    4795  VmaAllocation hAllocation) = 0;
    4796 
    4797  // Frees suballocation assigned to given memory region.
    4798  virtual void Free(const VmaAllocation allocation) = 0;
    4799  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4800 
    4801  // Tries to resize (grow or shrink) space for given allocation, in place.
    4802  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4803 
    4804 protected:
    4805  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4806 
    4807 #if VMA_STATS_STRING_ENABLED
    4808  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4809  VkDeviceSize unusedBytes,
    4810  size_t allocationCount,
    4811  size_t unusedRangeCount) const;
    4812  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4813  VkDeviceSize offset,
    4814  VmaAllocation hAllocation) const;
    4815  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4816  VkDeviceSize offset,
    4817  VkDeviceSize size) const;
    4818  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4819 #endif
    4820 
    4821 private:
    4822  VkDeviceSize m_Size;
    4823  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4824 };
    4825 
    4826 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4827  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4828  return false; \
    4829  } } while(false)
    4830 
    4831 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4832 {
    4833  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4834 public:
    4835  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4836  virtual ~VmaBlockMetadata_Generic();
    4837  virtual void Init(VkDeviceSize size);
    4838 
    4839  virtual bool Validate() const;
    4840  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4841  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4842  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4843  virtual bool IsEmpty() const;
    4844 
    4845  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4846  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4847 
    4848 #if VMA_STATS_STRING_ENABLED
    4849  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4850 #endif
    4851 
    4852  virtual bool CreateAllocationRequest(
    4853  uint32_t currentFrameIndex,
    4854  uint32_t frameInUseCount,
    4855  VkDeviceSize bufferImageGranularity,
    4856  VkDeviceSize allocSize,
    4857  VkDeviceSize allocAlignment,
    4858  bool upperAddress,
    4859  VmaSuballocationType allocType,
    4860  bool canMakeOtherLost,
    4861  uint32_t strategy,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual bool MakeRequestedAllocationsLost(
    4865  uint32_t currentFrameIndex,
    4866  uint32_t frameInUseCount,
    4867  VmaAllocationRequest* pAllocationRequest);
    4868 
    4869  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4870 
    4871  virtual VkResult CheckCorruption(const void* pBlockData);
    4872 
    4873  virtual void Alloc(
    4874  const VmaAllocationRequest& request,
    4875  VmaSuballocationType type,
    4876  VkDeviceSize allocSize,
    4877  bool upperAddress,
    4878  VmaAllocation hAllocation);
    4879 
    4880  virtual void Free(const VmaAllocation allocation);
    4881  virtual void FreeAtOffset(VkDeviceSize offset);
    4882 
    4883  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4884 
    4885 private:
    4886  uint32_t m_FreeCount;
    4887  VkDeviceSize m_SumFreeSize;
    4888  VmaSuballocationList m_Suballocations;
    4889  // Suballocations that are free and have size greater than certain threshold.
    4890  // Sorted by size, ascending.
    4891  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4892 
    4893  bool ValidateFreeSuballocationList() const;
    4894 
    4895  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4896  // If yes, fills pOffset and returns true. If no, returns false.
    4897  bool CheckAllocation(
    4898  uint32_t currentFrameIndex,
    4899  uint32_t frameInUseCount,
    4900  VkDeviceSize bufferImageGranularity,
    4901  VkDeviceSize allocSize,
    4902  VkDeviceSize allocAlignment,
    4903  VmaSuballocationType allocType,
    4904  VmaSuballocationList::const_iterator suballocItem,
    4905  bool canMakeOtherLost,
    4906  VkDeviceSize* pOffset,
    4907  size_t* itemsToMakeLostCount,
    4908  VkDeviceSize* pSumFreeSize,
    4909  VkDeviceSize* pSumItemSize) const;
    4910  // Given free suballocation, it merges it with following one, which must also be free.
    4911  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4912  // Releases given suballocation, making it free.
    4913  // Merges it with adjacent free suballocations if applicable.
    4914  // Returns iterator to new free suballocation at this place.
    4915  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4916  // Given free suballocation, it inserts it into sorted list of
    4917  // m_FreeSuballocationsBySize if it's suitable.
    4918  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4919  // Given free suballocation, it removes it from sorted list of
    4920  // m_FreeSuballocationsBySize if it's suitable.
    4921  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4922 };
    4923 
    4924 /*
    4925 Allocations and their references in internal data structure look like this:
    4926 
    4927 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4928 
    4929  0 +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 1st[m_1stNullItemsBeginCount]
    4935  +-------+
    4936  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4937  +-------+
    4938  | ... |
    4939  +-------+
    4940  | Alloc | 1st[1st.size() - 1]
    4941  +-------+
    4942  | |
    4943  | |
    4944  | |
    4945 GetSize() +-------+
    4946 
    4947 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4948 
    4949  0 +-------+
    4950  | Alloc | 2nd[0]
    4951  +-------+
    4952  | Alloc | 2nd[1]
    4953  +-------+
    4954  | ... |
    4955  +-------+
    4956  | Alloc | 2nd[2nd.size() - 1]
    4957  +-------+
    4958  | |
    4959  | |
    4960  | |
    4961  +-------+
    4962  | Alloc | 1st[m_1stNullItemsBeginCount]
    4963  +-------+
    4964  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4965  +-------+
    4966  | ... |
    4967  +-------+
    4968  | Alloc | 1st[1st.size() - 1]
    4969  +-------+
    4970  | |
    4971 GetSize() +-------+
    4972 
    4973 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4974 
    4975  0 +-------+
    4976  | |
    4977  | |
    4978  | |
    4979  +-------+
    4980  | Alloc | 1st[m_1stNullItemsBeginCount]
    4981  +-------+
    4982  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4983  +-------+
    4984  | ... |
    4985  +-------+
    4986  | Alloc | 1st[1st.size() - 1]
    4987  +-------+
    4988  | |
    4989  | |
    4990  | |
    4991  +-------+
    4992  | Alloc | 2nd[2nd.size() - 1]
    4993  +-------+
    4994  | ... |
    4995  +-------+
    4996  | Alloc | 2nd[1]
    4997  +-------+
    4998  | Alloc | 2nd[0]
    4999 GetSize() +-------+
    5000 
    5001 */
    5002 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5003 {
    5004  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5005 public:
    5006  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5007  virtual ~VmaBlockMetadata_Linear();
    5008  virtual void Init(VkDeviceSize size);
    5009 
    5010  virtual bool Validate() const;
    5011  virtual size_t GetAllocationCount() const;
    5012  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5013  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5014  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5015 
    5016  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5017  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5018 
    5019 #if VMA_STATS_STRING_ENABLED
    5020  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5021 #endif
    5022 
    5023  virtual bool CreateAllocationRequest(
    5024  uint32_t currentFrameIndex,
    5025  uint32_t frameInUseCount,
    5026  VkDeviceSize bufferImageGranularity,
    5027  VkDeviceSize allocSize,
    5028  VkDeviceSize allocAlignment,
    5029  bool upperAddress,
    5030  VmaSuballocationType allocType,
    5031  bool canMakeOtherLost,
    5032  uint32_t strategy,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual bool MakeRequestedAllocationsLost(
    5036  uint32_t currentFrameIndex,
    5037  uint32_t frameInUseCount,
    5038  VmaAllocationRequest* pAllocationRequest);
    5039 
    5040  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5041 
    5042  virtual VkResult CheckCorruption(const void* pBlockData);
    5043 
    5044  virtual void Alloc(
    5045  const VmaAllocationRequest& request,
    5046  VmaSuballocationType type,
    5047  VkDeviceSize allocSize,
    5048  bool upperAddress,
    5049  VmaAllocation hAllocation);
    5050 
    5051  virtual void Free(const VmaAllocation allocation);
    5052  virtual void FreeAtOffset(VkDeviceSize offset);
    5053 
    5054 private:
    5055  /*
    5056  There are two suballocation vectors, used in ping-pong way.
    5057  The one with index m_1stVectorIndex is called 1st.
    5058  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5059  2nd can be non-empty only when 1st is not empty.
    5060  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5061  */
    5062  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5063 
    5064  enum SECOND_VECTOR_MODE
    5065  {
    5066  SECOND_VECTOR_EMPTY,
    5067  /*
    5068  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5069  all have smaller offset.
    5070  */
    5071  SECOND_VECTOR_RING_BUFFER,
    5072  /*
    5073  Suballocations in 2nd vector are upper side of double stack.
    5074  They all have offsets higher than those in 1st vector.
    5075  Top of this stack means smaller offsets, but higher indices in this vector.
    5076  */
    5077  SECOND_VECTOR_DOUBLE_STACK,
    5078  };
    5079 
    5080  VkDeviceSize m_SumFreeSize;
    5081  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5082  uint32_t m_1stVectorIndex;
    5083  SECOND_VECTOR_MODE m_2ndVectorMode;
    5084 
    5085  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5086  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5087  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5088  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5089 
    5090  // Number of items in 1st vector with hAllocation = null at the beginning.
    5091  size_t m_1stNullItemsBeginCount;
    5092  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5093  size_t m_1stNullItemsMiddleCount;
    5094  // Number of items in 2nd vector with hAllocation = null.
    5095  size_t m_2ndNullItemsCount;
    5096 
    5097  bool ShouldCompact1st() const;
    5098  void CleanupAfterFree();
    5099 };
    5100 
    5101 /*
    5102 - GetSize() is the original size of allocated memory block.
    5103 - m_UsableSize is this size aligned down to a power of two.
    5104  All allocations and calculations happen relative to m_UsableSize.
    5105 - GetUnusableSize() is the difference between them.
    5106  It is repoted as separate, unused range, not available for allocations.
    5107 
    5108 Node at level 0 has size = m_UsableSize.
    5109 Each next level contains nodes with size 2 times smaller than current level.
    5110 m_LevelCount is the maximum number of levels to use in the current object.
    5111 */
    5112 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5113 {
    5114  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5115 public:
    5116  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5117  virtual ~VmaBlockMetadata_Buddy();
    5118  virtual void Init(VkDeviceSize size);
    5119 
    5120  virtual bool Validate() const;
    5121  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5122  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5123  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5124  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5125 
    5126  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5127  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5128 
    5129 #if VMA_STATS_STRING_ENABLED
    5130  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5131 #endif
    5132 
    5133  virtual bool CreateAllocationRequest(
    5134  uint32_t currentFrameIndex,
    5135  uint32_t frameInUseCount,
    5136  VkDeviceSize bufferImageGranularity,
    5137  VkDeviceSize allocSize,
    5138  VkDeviceSize allocAlignment,
    5139  bool upperAddress,
    5140  VmaSuballocationType allocType,
    5141  bool canMakeOtherLost,
    5142  uint32_t strategy,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual bool MakeRequestedAllocationsLost(
    5146  uint32_t currentFrameIndex,
    5147  uint32_t frameInUseCount,
    5148  VmaAllocationRequest* pAllocationRequest);
    5149 
    5150  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5151 
    5152  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5153 
    5154  virtual void Alloc(
    5155  const VmaAllocationRequest& request,
    5156  VmaSuballocationType type,
    5157  VkDeviceSize allocSize,
    5158  bool upperAddress,
    5159  VmaAllocation hAllocation);
    5160 
    5161  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5162  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5163 
    5164 private:
    5165  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5166  static const size_t MAX_LEVELS = 30;
    5167 
    5168  struct ValidationContext
    5169  {
    5170  size_t calculatedAllocationCount;
    5171  size_t calculatedFreeCount;
    5172  VkDeviceSize calculatedSumFreeSize;
    5173 
    5174  ValidationContext() :
    5175  calculatedAllocationCount(0),
    5176  calculatedFreeCount(0),
    5177  calculatedSumFreeSize(0) { }
    5178  };
    5179 
    5180  struct Node
    5181  {
    5182  VkDeviceSize offset;
    5183  enum TYPE
    5184  {
    5185  TYPE_FREE,
    5186  TYPE_ALLOCATION,
    5187  TYPE_SPLIT,
    5188  TYPE_COUNT
    5189  } type;
    5190  Node* parent;
    5191  Node* buddy;
    5192 
    5193  union
    5194  {
    5195  struct
    5196  {
    5197  Node* prev;
    5198  Node* next;
    5199  } free;
    5200  struct
    5201  {
    5202  VmaAllocation alloc;
    5203  } allocation;
    5204  struct
    5205  {
    5206  Node* leftChild;
    5207  } split;
    5208  };
    5209  };
    5210 
    5211  // Size of the memory block aligned down to a power of two.
    5212  VkDeviceSize m_UsableSize;
    5213  uint32_t m_LevelCount;
    5214 
    5215  Node* m_Root;
    5216  struct {
    5217  Node* front;
    5218  Node* back;
    5219  } m_FreeList[MAX_LEVELS];
    5220  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5221  size_t m_AllocationCount;
    5222  // Number of nodes in the tree with type == TYPE_FREE.
    5223  size_t m_FreeCount;
    5224  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5225  VkDeviceSize m_SumFreeSize;
    5226 
    5227  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5228  void DeleteNode(Node* node);
    5229  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5230  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5231  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5232  // Alloc passed just for validation. Can be null.
    5233  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5234  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5235  // Adds node to the front of FreeList at given level.
    5236  // node->type must be FREE.
    5237  // node->free.prev, next can be undefined.
    5238  void AddToFreeListFront(uint32_t level, Node* node);
    5239  // Removes node from FreeList at given level.
    5240  // node->type must be FREE.
    5241  // node->free.prev, next stay untouched.
    5242  void RemoveFromFreeList(uint32_t level, Node* node);
    5243 
    5244 #if VMA_STATS_STRING_ENABLED
    5245  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5246 #endif
    5247 };
    5248 
    5249 /*
    5250 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5251 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5252 
    5253 Thread-safety: This class must be externally synchronized.
    5254 */
    5255 class VmaDeviceMemoryBlock
    5256 {
    5257  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5258 public:
    5259  VmaBlockMetadata* m_pMetadata;
    5260 
    5261  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5262 
    5263  ~VmaDeviceMemoryBlock()
    5264  {
    5265  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5266  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5267  }
    5268 
    5269  // Always call after construction.
    5270  void Init(
    5271  VmaAllocator hAllocator,
    5272  uint32_t newMemoryTypeIndex,
    5273  VkDeviceMemory newMemory,
    5274  VkDeviceSize newSize,
    5275  uint32_t id,
    5276  uint32_t algorithm);
    5277  // Always call before destruction.
    5278  void Destroy(VmaAllocator allocator);
    5279 
    5280  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5281  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5282  uint32_t GetId() const { return m_Id; }
    5283  void* GetMappedData() const { return m_pMappedData; }
    5284 
    5285  // Validates all data structures inside this object. If not valid, returns false.
    5286  bool Validate() const;
    5287 
    5288  VkResult CheckCorruption(VmaAllocator hAllocator);
    5289 
    5290  // ppData can be null.
    5291  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5292  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5293 
    5294  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5295  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296 
    5297  VkResult BindBufferMemory(
    5298  const VmaAllocator hAllocator,
    5299  const VmaAllocation hAllocation,
    5300  VkBuffer hBuffer);
    5301  VkResult BindImageMemory(
    5302  const VmaAllocator hAllocator,
    5303  const VmaAllocation hAllocation,
    5304  VkImage hImage);
    5305 
    5306 private:
    5307  uint32_t m_MemoryTypeIndex;
    5308  uint32_t m_Id;
    5309  VkDeviceMemory m_hMemory;
    5310 
    5311  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5312  // Also protects m_MapCount, m_pMappedData.
    5313  VMA_MUTEX m_Mutex;
    5314  uint32_t m_MapCount;
    5315  void* m_pMappedData;
    5316 };
    5317 
    5318 struct VmaPointerLess
    5319 {
    5320  bool operator()(const void* lhs, const void* rhs) const
    5321  {
    5322  return lhs < rhs;
    5323  }
    5324 };
    5325 
    5326 class VmaDefragmentator;
    5327 
    5328 /*
    5329 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5330 Vulkan memory type.
    5331 
    5332 Synchronized internally with a mutex.
    5333 */
    5334 struct VmaBlockVector
    5335 {
    5336  VMA_CLASS_NO_COPY(VmaBlockVector)
    5337 public:
    5338  VmaBlockVector(
    5339  VmaAllocator hAllocator,
    5340  uint32_t memoryTypeIndex,
    5341  VkDeviceSize preferredBlockSize,
    5342  size_t minBlockCount,
    5343  size_t maxBlockCount,
    5344  VkDeviceSize bufferImageGranularity,
    5345  uint32_t frameInUseCount,
    5346  bool isCustomPool,
    5347  bool explicitBlockSize,
    5348  uint32_t algorithm);
    5349  ~VmaBlockVector();
    5350 
    5351  VkResult CreateMinBlocks();
    5352 
    5353  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5354  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5355  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5356  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5357  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5358 
    5359  void GetPoolStats(VmaPoolStats* pStats);
    5360 
    5361  bool IsEmpty() const { return m_Blocks.empty(); }
    5362  bool IsCorruptionDetectionEnabled() const;
    5363 
    5364  VkResult Allocate(
    5365  VmaPool hCurrentPool,
    5366  uint32_t currentFrameIndex,
    5367  VkDeviceSize size,
    5368  VkDeviceSize alignment,
    5369  const VmaAllocationCreateInfo& createInfo,
    5370  VmaSuballocationType suballocType,
    5371  VmaAllocation* pAllocation);
    5372 
    5373  void Free(
    5374  VmaAllocation hAllocation);
    5375 
    5376  // Adds statistics of this BlockVector to pStats.
    5377  void AddStats(VmaStats* pStats);
    5378 
    5379 #if VMA_STATS_STRING_ENABLED
    5380  void PrintDetailedMap(class VmaJsonWriter& json);
    5381 #endif
    5382 
    5383  void MakePoolAllocationsLost(
    5384  uint32_t currentFrameIndex,
    5385  size_t* pLostAllocationCount);
    5386  VkResult CheckCorruption();
    5387 
    5388  VmaDefragmentator* EnsureDefragmentator(
    5389  VmaAllocator hAllocator,
    5390  uint32_t currentFrameIndex);
    5391 
    5392  VkResult Defragment(
    5393  VmaDefragmentationStats* pDefragmentationStats,
    5394  VkDeviceSize& maxBytesToMove,
    5395  uint32_t& maxAllocationsToMove);
    5396 
    5397  void DestroyDefragmentator();
    5398 
    5399 private:
    5400  friend class VmaDefragmentator;
    5401 
    5402  const VmaAllocator m_hAllocator;
    5403  const uint32_t m_MemoryTypeIndex;
    5404  const VkDeviceSize m_PreferredBlockSize;
    5405  const size_t m_MinBlockCount;
    5406  const size_t m_MaxBlockCount;
    5407  const VkDeviceSize m_BufferImageGranularity;
    5408  const uint32_t m_FrameInUseCount;
    5409  const bool m_IsCustomPool;
    5410  const bool m_ExplicitBlockSize;
    5411  const uint32_t m_Algorithm;
    5412  bool m_HasEmptyBlock;
    5413  VMA_MUTEX m_Mutex;
    5414  // Incrementally sorted by sumFreeSize, ascending.
    5415  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5416  /* There can be at most one allocation that is completely empty - a
    5417  hysteresis to avoid pessimistic case of alternating creation and destruction
    5418  of a VkDeviceMemory. */
    5419  VmaDefragmentator* m_pDefragmentator;
    5420  uint32_t m_NextBlockId;
    5421 
    5422  VkDeviceSize CalcMaxBlockSize() const;
    5423 
    5424  // Finds and removes given block from vector.
    5425  void Remove(VmaDeviceMemoryBlock* pBlock);
    5426 
    5427  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5428  // after this call.
    5429  void IncrementallySortBlocks();
    5430 
    5431  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5432  VkResult AllocateFromBlock(
    5433  VmaDeviceMemoryBlock* pBlock,
    5434  VmaPool hCurrentPool,
    5435  uint32_t currentFrameIndex,
    5436  VkDeviceSize size,
    5437  VkDeviceSize alignment,
    5438  VmaAllocationCreateFlags allocFlags,
    5439  void* pUserData,
    5440  VmaSuballocationType suballocType,
    5441  uint32_t strategy,
    5442  VmaAllocation* pAllocation);
    5443 
    5444  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5445 };
    5446 
    5447 struct VmaPool_T
    5448 {
    5449  VMA_CLASS_NO_COPY(VmaPool_T)
    5450 public:
    5451  VmaBlockVector m_BlockVector;
    5452 
    5453  VmaPool_T(
    5454  VmaAllocator hAllocator,
    5455  const VmaPoolCreateInfo& createInfo,
    5456  VkDeviceSize preferredBlockSize);
    5457  ~VmaPool_T();
    5458 
    5459  uint32_t GetId() const { return m_Id; }
    5460  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5461 
    5462 #if VMA_STATS_STRING_ENABLED
    5463  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5464 #endif
    5465 
    5466 private:
    5467  uint32_t m_Id;
    5468 };
    5469 
    5470 class VmaDefragmentator
    5471 {
    5472  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5473 private:
    5474  const VmaAllocator m_hAllocator;
    5475  VmaBlockVector* const m_pBlockVector;
    5476  uint32_t m_CurrentFrameIndex;
    5477  VkDeviceSize m_BytesMoved;
    5478  uint32_t m_AllocationsMoved;
    5479 
    5480  struct AllocationInfo
    5481  {
    5482  VmaAllocation m_hAllocation;
    5483  VkBool32* m_pChanged;
    5484 
    5485  AllocationInfo() :
    5486  m_hAllocation(VK_NULL_HANDLE),
    5487  m_pChanged(VMA_NULL)
    5488  {
    5489  }
    5490  };
    5491 
    5492  struct AllocationInfoSizeGreater
    5493  {
    5494  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5495  {
    5496  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5497  }
    5498  };
    5499 
    5500  // Used between AddAllocation and Defragment.
    5501  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5502 
    5503  struct BlockInfo
    5504  {
    5505  VmaDeviceMemoryBlock* m_pBlock;
    5506  bool m_HasNonMovableAllocations;
    5507  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5508 
    5509  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5510  m_pBlock(VMA_NULL),
    5511  m_HasNonMovableAllocations(true),
    5512  m_Allocations(pAllocationCallbacks),
    5513  m_pMappedDataForDefragmentation(VMA_NULL)
    5514  {
    5515  }
    5516 
    5517  void CalcHasNonMovableAllocations()
    5518  {
    5519  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5520  const size_t defragmentAllocCount = m_Allocations.size();
    5521  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5522  }
    5523 
    5524  void SortAllocationsBySizeDescecnding()
    5525  {
    5526  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5527  }
    5528 
    5529  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5530  void Unmap(VmaAllocator hAllocator);
    5531 
    5532  private:
    5533  // Not null if mapped for defragmentation only, not originally mapped.
    5534  void* m_pMappedDataForDefragmentation;
    5535  };
    5536 
    5537  struct BlockPointerLess
    5538  {
    5539  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5540  {
    5541  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5542  }
    5543  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5544  {
    5545  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5546  }
    5547  };
    5548 
    5549  // 1. Blocks with some non-movable allocations go first.
    5550  // 2. Blocks with smaller sumFreeSize go first.
    5551  struct BlockInfoCompareMoveDestination
    5552  {
    5553  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5554  {
    5555  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5556  {
    5557  return true;
    5558  }
    5559  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5560  {
    5561  return false;
    5562  }
    5563  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5564  {
    5565  return true;
    5566  }
    5567  return false;
    5568  }
    5569  };
    5570 
    5571  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5572  BlockInfoVector m_Blocks;
    5573 
    5574  VkResult DefragmentRound(
    5575  VkDeviceSize maxBytesToMove,
    5576  uint32_t maxAllocationsToMove);
    5577 
    5578  static bool MoveMakesSense(
    5579  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5580  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5581 
    5582 public:
    5583  VmaDefragmentator(
    5584  VmaAllocator hAllocator,
    5585  VmaBlockVector* pBlockVector,
    5586  uint32_t currentFrameIndex);
    5587 
    5588  ~VmaDefragmentator();
    5589 
    5590  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5591  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5592 
    5593  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5594 
    5595  VkResult Defragment(
    5596  VkDeviceSize maxBytesToMove,
    5597  uint32_t maxAllocationsToMove);
    5598 };
    5599 
    5600 #if VMA_RECORDING_ENABLED
    5601 
    5602 class VmaRecorder
    5603 {
    5604 public:
    5605  VmaRecorder();
    5606  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5607  void WriteConfiguration(
    5608  const VkPhysicalDeviceProperties& devProps,
    5609  const VkPhysicalDeviceMemoryProperties& memProps,
    5610  bool dedicatedAllocationExtensionEnabled);
    5611  ~VmaRecorder();
    5612 
    5613  void RecordCreateAllocator(uint32_t frameIndex);
    5614  void RecordDestroyAllocator(uint32_t frameIndex);
    5615  void RecordCreatePool(uint32_t frameIndex,
    5616  const VmaPoolCreateInfo& createInfo,
    5617  VmaPool pool);
    5618  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5619  void RecordAllocateMemory(uint32_t frameIndex,
    5620  const VkMemoryRequirements& vkMemReq,
    5621  const VmaAllocationCreateInfo& createInfo,
    5622  VmaAllocation allocation);
    5623  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5624  const VkMemoryRequirements& vkMemReq,
    5625  bool requiresDedicatedAllocation,
    5626  bool prefersDedicatedAllocation,
    5627  const VmaAllocationCreateInfo& createInfo,
    5628  VmaAllocation allocation);
    5629  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5630  const VkMemoryRequirements& vkMemReq,
    5631  bool requiresDedicatedAllocation,
    5632  bool prefersDedicatedAllocation,
    5633  const VmaAllocationCreateInfo& createInfo,
    5634  VmaAllocation allocation);
    5635  void RecordFreeMemory(uint32_t frameIndex,
    5636  VmaAllocation allocation);
    5637  void RecordResizeAllocation(
    5638  uint32_t frameIndex,
    5639  VmaAllocation allocation,
    5640  VkDeviceSize newSize);
    5641  void RecordSetAllocationUserData(uint32_t frameIndex,
    5642  VmaAllocation allocation,
    5643  const void* pUserData);
    5644  void RecordCreateLostAllocation(uint32_t frameIndex,
    5645  VmaAllocation allocation);
    5646  void RecordMapMemory(uint32_t frameIndex,
    5647  VmaAllocation allocation);
    5648  void RecordUnmapMemory(uint32_t frameIndex,
    5649  VmaAllocation allocation);
    5650  void RecordFlushAllocation(uint32_t frameIndex,
    5651  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5652  void RecordInvalidateAllocation(uint32_t frameIndex,
    5653  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5654  void RecordCreateBuffer(uint32_t frameIndex,
    5655  const VkBufferCreateInfo& bufCreateInfo,
    5656  const VmaAllocationCreateInfo& allocCreateInfo,
    5657  VmaAllocation allocation);
    5658  void RecordCreateImage(uint32_t frameIndex,
    5659  const VkImageCreateInfo& imageCreateInfo,
    5660  const VmaAllocationCreateInfo& allocCreateInfo,
    5661  VmaAllocation allocation);
    5662  void RecordDestroyBuffer(uint32_t frameIndex,
    5663  VmaAllocation allocation);
    5664  void RecordDestroyImage(uint32_t frameIndex,
    5665  VmaAllocation allocation);
    5666  void RecordTouchAllocation(uint32_t frameIndex,
    5667  VmaAllocation allocation);
    5668  void RecordGetAllocationInfo(uint32_t frameIndex,
    5669  VmaAllocation allocation);
    5670  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5671  VmaPool pool);
    5672 
    5673 private:
    5674  struct CallParams
    5675  {
    5676  uint32_t threadId;
    5677  double time;
    5678  };
    5679 
    5680  class UserDataString
    5681  {
    5682  public:
    5683  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5684  const char* GetString() const { return m_Str; }
    5685 
    5686  private:
    5687  char m_PtrStr[17];
    5688  const char* m_Str;
    5689  };
    5690 
    5691  bool m_UseMutex;
    5692  VmaRecordFlags m_Flags;
    5693  FILE* m_File;
    5694  VMA_MUTEX m_FileMutex;
    5695  int64_t m_Freq;
    5696  int64_t m_StartCounter;
    5697 
    5698  void GetBasicParams(CallParams& outParams);
    5699  void Flush();
    5700 };
    5701 
    5702 #endif // #if VMA_RECORDING_ENABLED
    5703 
    5704 // Main allocator object.
    5705 struct VmaAllocator_T
    5706 {
    5707  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5708 public:
    5709  bool m_UseMutex;
    5710  bool m_UseKhrDedicatedAllocation;
    5711  VkDevice m_hDevice;
    5712  bool m_AllocationCallbacksSpecified;
    5713  VkAllocationCallbacks m_AllocationCallbacks;
    5714  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5715 
    5716  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5717  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5718  VMA_MUTEX m_HeapSizeLimitMutex;
    5719 
    5720  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5721  VkPhysicalDeviceMemoryProperties m_MemProps;
    5722 
    5723  // Default pools.
    5724  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5725 
    5726  // Each vector is sorted by memory (handle value).
    5727  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5728  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5729  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5730 
    5731  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5732  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  ~VmaAllocator_T();
    5734 
    5735  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5736  {
    5737  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5738  }
    5739  const VmaVulkanFunctions& GetVulkanFunctions() const
    5740  {
    5741  return m_VulkanFunctions;
    5742  }
    5743 
    5744  VkDeviceSize GetBufferImageGranularity() const
    5745  {
    5746  return VMA_MAX(
    5747  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5748  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5749  }
    5750 
    5751  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5752  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5753 
    5754  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5755  {
    5756  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5757  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5758  }
    5759  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5760  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5761  {
    5762  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5763  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5764  }
    5765  // Minimum alignment for all allocations in specific memory type.
    5766  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5767  {
    5768  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5769  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5770  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5771  }
    5772 
    5773  bool IsIntegratedGpu() const
    5774  {
    5775  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5776  }
    5777 
    5778 #if VMA_RECORDING_ENABLED
    5779  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5780 #endif
    5781 
    5782  void GetBufferMemoryRequirements(
    5783  VkBuffer hBuffer,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787  void GetImageMemoryRequirements(
    5788  VkImage hImage,
    5789  VkMemoryRequirements& memReq,
    5790  bool& requiresDedicatedAllocation,
    5791  bool& prefersDedicatedAllocation) const;
    5792 
    5793  // Main allocation function.
    5794  VkResult AllocateMemory(
    5795  const VkMemoryRequirements& vkMemReq,
    5796  bool requiresDedicatedAllocation,
    5797  bool prefersDedicatedAllocation,
    5798  VkBuffer dedicatedBuffer,
    5799  VkImage dedicatedImage,
    5800  const VmaAllocationCreateInfo& createInfo,
    5801  VmaSuballocationType suballocType,
    5802  VmaAllocation* pAllocation);
    5803 
    5804  // Main deallocation function.
    5805  void FreeMemory(const VmaAllocation allocation);
    5806 
    5807  VkResult ResizeAllocation(
    5808  const VmaAllocation alloc,
    5809  VkDeviceSize newSize);
    5810 
    5811  void CalculateStats(VmaStats* pStats);
    5812 
    5813 #if VMA_STATS_STRING_ENABLED
    5814  void PrintDetailedMap(class VmaJsonWriter& json);
    5815 #endif
    5816 
    5817  VkResult Defragment(
    5818  VmaAllocation* pAllocations,
    5819  size_t allocationCount,
    5820  VkBool32* pAllocationsChanged,
    5821  const VmaDefragmentationInfo* pDefragmentationInfo,
    5822  VmaDefragmentationStats* pDefragmentationStats);
    5823 
    5824  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5825  bool TouchAllocation(VmaAllocation hAllocation);
    5826 
    5827  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5828  void DestroyPool(VmaPool pool);
    5829  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5830 
    5831  void SetCurrentFrameIndex(uint32_t frameIndex);
    5832  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5833 
    5834  void MakePoolAllocationsLost(
    5835  VmaPool hPool,
    5836  size_t* pLostAllocationCount);
    5837  VkResult CheckPoolCorruption(VmaPool hPool);
    5838  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5839 
    5840  void CreateLostAllocation(VmaAllocation* pAllocation);
    5841 
    5842  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5843  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5844 
    5845  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5846  void Unmap(VmaAllocation hAllocation);
    5847 
    5848  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5849  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5850 
    5851  void FlushOrInvalidateAllocation(
    5852  VmaAllocation hAllocation,
    5853  VkDeviceSize offset, VkDeviceSize size,
    5854  VMA_CACHE_OPERATION op);
    5855 
    5856  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5857 
    5858 private:
    5859  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5860 
    5861  VkPhysicalDevice m_PhysicalDevice;
    5862  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5863 
    5864  VMA_MUTEX m_PoolsMutex;
    5865  // Protected by m_PoolsMutex. Sorted by pointer value.
    5866  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5867  uint32_t m_NextPoolId;
    5868 
    5869  VmaVulkanFunctions m_VulkanFunctions;
    5870 
    5871 #if VMA_RECORDING_ENABLED
    5872  VmaRecorder* m_pRecorder;
    5873 #endif
    5874 
    5875  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5876 
    5877  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5878 
    5879  VkResult AllocateMemoryOfType(
    5880  VkDeviceSize size,
    5881  VkDeviceSize alignment,
    5882  bool dedicatedAllocation,
    5883  VkBuffer dedicatedBuffer,
    5884  VkImage dedicatedImage,
    5885  const VmaAllocationCreateInfo& createInfo,
    5886  uint32_t memTypeIndex,
    5887  VmaSuballocationType suballocType,
    5888  VmaAllocation* pAllocation);
    5889 
    5890  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5891  VkResult AllocateDedicatedMemory(
    5892  VkDeviceSize size,
    5893  VmaSuballocationType suballocType,
    5894  uint32_t memTypeIndex,
    5895  bool map,
    5896  bool isUserDataString,
    5897  void* pUserData,
    5898  VkBuffer dedicatedBuffer,
    5899  VkImage dedicatedImage,
    5900  VmaAllocation* pAllocation);
    5901 
    5902  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5903  void FreeDedicatedMemory(VmaAllocation allocation);
    5904 };
    5905 
    5907 // Memory allocation #2 after VmaAllocator_T definition
    5908 
    5909 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5910 {
    5911  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5912 }
    5913 
    5914 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5915 {
    5916  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5917 }
    5918 
    5919 template<typename T>
    5920 static T* VmaAllocate(VmaAllocator hAllocator)
    5921 {
    5922  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5923 }
    5924 
    5925 template<typename T>
    5926 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5927 {
    5928  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5929 }
    5930 
    5931 template<typename T>
    5932 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5933 {
    5934  if(ptr != VMA_NULL)
    5935  {
    5936  ptr->~T();
    5937  VmaFree(hAllocator, ptr);
    5938  }
    5939 }
    5940 
    5941 template<typename T>
    5942 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5943 {
    5944  if(ptr != VMA_NULL)
    5945  {
    5946  for(size_t i = count; i--; )
    5947  ptr[i].~T();
    5948  VmaFree(hAllocator, ptr);
    5949  }
    5950 }
    5951 
    5953 // VmaStringBuilder
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaStringBuilder
    5958 {
    5959 public:
    5960  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5961  size_t GetLength() const { return m_Data.size(); }
    5962  const char* GetData() const { return m_Data.data(); }
    5963 
    5964  void Add(char ch) { m_Data.push_back(ch); }
    5965  void Add(const char* pStr);
    5966  void AddNewLine() { Add('\n'); }
    5967  void AddNumber(uint32_t num);
    5968  void AddNumber(uint64_t num);
    5969  void AddPointer(const void* ptr);
    5970 
    5971 private:
    5972  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5973 };
    5974 
    5975 void VmaStringBuilder::Add(const char* pStr)
    5976 {
    5977  const size_t strLen = strlen(pStr);
    5978  if(strLen > 0)
    5979  {
    5980  const size_t oldCount = m_Data.size();
    5981  m_Data.resize(oldCount + strLen);
    5982  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5983  }
    5984 }
    5985 
    5986 void VmaStringBuilder::AddNumber(uint32_t num)
    5987 {
    5988  char buf[11];
    5989  VmaUint32ToStr(buf, sizeof(buf), num);
    5990  Add(buf);
    5991 }
    5992 
    5993 void VmaStringBuilder::AddNumber(uint64_t num)
    5994 {
    5995  char buf[21];
    5996  VmaUint64ToStr(buf, sizeof(buf), num);
    5997  Add(buf);
    5998 }
    5999 
    6000 void VmaStringBuilder::AddPointer(const void* ptr)
    6001 {
    6002  char buf[21];
    6003  VmaPtrToStr(buf, sizeof(buf), ptr);
    6004  Add(buf);
    6005 }
    6006 
    6007 #endif // #if VMA_STATS_STRING_ENABLED
    6008 
    6010 // VmaJsonWriter
    6011 
    6012 #if VMA_STATS_STRING_ENABLED
    6013 
    6014 class VmaJsonWriter
    6015 {
    6016  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6017 public:
    6018  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6019  ~VmaJsonWriter();
    6020 
    6021  void BeginObject(bool singleLine = false);
    6022  void EndObject();
    6023 
    6024  void BeginArray(bool singleLine = false);
    6025  void EndArray();
    6026 
    6027  void WriteString(const char* pStr);
    6028  void BeginString(const char* pStr = VMA_NULL);
    6029  void ContinueString(const char* pStr);
    6030  void ContinueString(uint32_t n);
    6031  void ContinueString(uint64_t n);
    6032  void ContinueString_Pointer(const void* ptr);
    6033  void EndString(const char* pStr = VMA_NULL);
    6034 
    6035  void WriteNumber(uint32_t n);
    6036  void WriteNumber(uint64_t n);
    6037  void WriteBool(bool b);
    6038  void WriteNull();
    6039 
    6040 private:
    6041  static const char* const INDENT;
    6042 
    6043  enum COLLECTION_TYPE
    6044  {
    6045  COLLECTION_TYPE_OBJECT,
    6046  COLLECTION_TYPE_ARRAY,
    6047  };
    6048  struct StackItem
    6049  {
    6050  COLLECTION_TYPE type;
    6051  uint32_t valueCount;
    6052  bool singleLineMode;
    6053  };
    6054 
    6055  VmaStringBuilder& m_SB;
    6056  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6057  bool m_InsideString;
    6058 
    6059  void BeginValue(bool isString);
    6060  void WriteIndent(bool oneLess = false);
    6061 };
    6062 
    6063 const char* const VmaJsonWriter::INDENT = " ";
    6064 
    6065 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6066  m_SB(sb),
    6067  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6068  m_InsideString(false)
    6069 {
    6070 }
    6071 
    6072 VmaJsonWriter::~VmaJsonWriter()
    6073 {
    6074  VMA_ASSERT(!m_InsideString);
    6075  VMA_ASSERT(m_Stack.empty());
    6076 }
    6077 
    6078 void VmaJsonWriter::BeginObject(bool singleLine)
    6079 {
    6080  VMA_ASSERT(!m_InsideString);
    6081 
    6082  BeginValue(false);
    6083  m_SB.Add('{');
    6084 
    6085  StackItem item;
    6086  item.type = COLLECTION_TYPE_OBJECT;
    6087  item.valueCount = 0;
    6088  item.singleLineMode = singleLine;
    6089  m_Stack.push_back(item);
    6090 }
    6091 
    6092 void VmaJsonWriter::EndObject()
    6093 {
    6094  VMA_ASSERT(!m_InsideString);
    6095 
    6096  WriteIndent(true);
    6097  m_SB.Add('}');
    6098 
    6099  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6100  m_Stack.pop_back();
    6101 }
    6102 
    6103 void VmaJsonWriter::BeginArray(bool singleLine)
    6104 {
    6105  VMA_ASSERT(!m_InsideString);
    6106 
    6107  BeginValue(false);
    6108  m_SB.Add('[');
    6109 
    6110  StackItem item;
    6111  item.type = COLLECTION_TYPE_ARRAY;
    6112  item.valueCount = 0;
    6113  item.singleLineMode = singleLine;
    6114  m_Stack.push_back(item);
    6115 }
    6116 
    6117 void VmaJsonWriter::EndArray()
    6118 {
    6119  VMA_ASSERT(!m_InsideString);
    6120 
    6121  WriteIndent(true);
    6122  m_SB.Add(']');
    6123 
    6124  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6125  m_Stack.pop_back();
    6126 }
    6127 
    6128 void VmaJsonWriter::WriteString(const char* pStr)
    6129 {
    6130  BeginString(pStr);
    6131  EndString();
    6132 }
    6133 
    6134 void VmaJsonWriter::BeginString(const char* pStr)
    6135 {
    6136  VMA_ASSERT(!m_InsideString);
    6137 
    6138  BeginValue(true);
    6139  m_SB.Add('"');
    6140  m_InsideString = true;
    6141  if(pStr != VMA_NULL && pStr[0] != '\0')
    6142  {
    6143  ContinueString(pStr);
    6144  }
    6145 }
    6146 
    6147 void VmaJsonWriter::ContinueString(const char* pStr)
    6148 {
    6149  VMA_ASSERT(m_InsideString);
    6150 
    6151  const size_t strLen = strlen(pStr);
    6152  for(size_t i = 0; i < strLen; ++i)
    6153  {
    6154  char ch = pStr[i];
    6155  if(ch == '\\')
    6156  {
    6157  m_SB.Add("\\\\");
    6158  }
    6159  else if(ch == '"')
    6160  {
    6161  m_SB.Add("\\\"");
    6162  }
    6163  else if(ch >= 32)
    6164  {
    6165  m_SB.Add(ch);
    6166  }
    6167  else switch(ch)
    6168  {
    6169  case '\b':
    6170  m_SB.Add("\\b");
    6171  break;
    6172  case '\f':
    6173  m_SB.Add("\\f");
    6174  break;
    6175  case '\n':
    6176  m_SB.Add("\\n");
    6177  break;
    6178  case '\r':
    6179  m_SB.Add("\\r");
    6180  break;
    6181  case '\t':
    6182  m_SB.Add("\\t");
    6183  break;
    6184  default:
    6185  VMA_ASSERT(0 && "Character not currently supported.");
    6186  break;
    6187  }
    6188  }
    6189 }
    6190 
    6191 void VmaJsonWriter::ContinueString(uint32_t n)
    6192 {
    6193  VMA_ASSERT(m_InsideString);
    6194  m_SB.AddNumber(n);
    6195 }
    6196 
    6197 void VmaJsonWriter::ContinueString(uint64_t n)
    6198 {
    6199  VMA_ASSERT(m_InsideString);
    6200  m_SB.AddNumber(n);
    6201 }
    6202 
    6203 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6204 {
    6205  VMA_ASSERT(m_InsideString);
    6206  m_SB.AddPointer(ptr);
    6207 }
    6208 
    6209 void VmaJsonWriter::EndString(const char* pStr)
    6210 {
    6211  VMA_ASSERT(m_InsideString);
    6212  if(pStr != VMA_NULL && pStr[0] != '\0')
    6213  {
    6214  ContinueString(pStr);
    6215  }
    6216  m_SB.Add('"');
    6217  m_InsideString = false;
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteNumber(uint32_t n)
    6221 {
    6222  VMA_ASSERT(!m_InsideString);
    6223  BeginValue(false);
    6224  m_SB.AddNumber(n);
    6225 }
    6226 
    6227 void VmaJsonWriter::WriteNumber(uint64_t n)
    6228 {
    6229  VMA_ASSERT(!m_InsideString);
    6230  BeginValue(false);
    6231  m_SB.AddNumber(n);
    6232 }
    6233 
    6234 void VmaJsonWriter::WriteBool(bool b)
    6235 {
    6236  VMA_ASSERT(!m_InsideString);
    6237  BeginValue(false);
    6238  m_SB.Add(b ? "true" : "false");
    6239 }
    6240 
    6241 void VmaJsonWriter::WriteNull()
    6242 {
    6243  VMA_ASSERT(!m_InsideString);
    6244  BeginValue(false);
    6245  m_SB.Add("null");
    6246 }
    6247 
    6248 void VmaJsonWriter::BeginValue(bool isString)
    6249 {
    6250  if(!m_Stack.empty())
    6251  {
    6252  StackItem& currItem = m_Stack.back();
    6253  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6254  currItem.valueCount % 2 == 0)
    6255  {
    6256  VMA_ASSERT(isString);
    6257  }
    6258 
    6259  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6260  currItem.valueCount % 2 != 0)
    6261  {
    6262  m_SB.Add(": ");
    6263  }
    6264  else if(currItem.valueCount > 0)
    6265  {
    6266  m_SB.Add(", ");
    6267  WriteIndent();
    6268  }
    6269  else
    6270  {
    6271  WriteIndent();
    6272  }
    6273  ++currItem.valueCount;
    6274  }
    6275 }
    6276 
    6277 void VmaJsonWriter::WriteIndent(bool oneLess)
    6278 {
    6279  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6280  {
    6281  m_SB.AddNewLine();
    6282 
    6283  size_t count = m_Stack.size();
    6284  if(count > 0 && oneLess)
    6285  {
    6286  --count;
    6287  }
    6288  for(size_t i = 0; i < count; ++i)
    6289  {
    6290  m_SB.Add(INDENT);
    6291  }
    6292  }
    6293 }
    6294 
    6295 #endif // #if VMA_STATS_STRING_ENABLED
    6296 
    6298 
    6299 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6300 {
    6301  if(IsUserDataString())
    6302  {
    6303  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6304 
    6305  FreeUserDataString(hAllocator);
    6306 
    6307  if(pUserData != VMA_NULL)
    6308  {
    6309  const char* const newStrSrc = (char*)pUserData;
    6310  const size_t newStrLen = strlen(newStrSrc);
    6311  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6312  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6313  m_pUserData = newStrDst;
    6314  }
    6315  }
    6316  else
    6317  {
    6318  m_pUserData = pUserData;
    6319  }
    6320 }
    6321 
    6322 void VmaAllocation_T::ChangeBlockAllocation(
    6323  VmaAllocator hAllocator,
    6324  VmaDeviceMemoryBlock* block,
    6325  VkDeviceSize offset)
    6326 {
    6327  VMA_ASSERT(block != VMA_NULL);
    6328  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6329 
    6330  // Move mapping reference counter from old block to new block.
    6331  if(block != m_BlockAllocation.m_Block)
    6332  {
    6333  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6334  if(IsPersistentMap())
    6335  ++mapRefCount;
    6336  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6337  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6338  }
    6339 
    6340  m_BlockAllocation.m_Block = block;
    6341  m_BlockAllocation.m_Offset = offset;
    6342 }
    6343 
    6344 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6345 {
    6346  VMA_ASSERT(newSize > 0);
    6347  m_Size = newSize;
    6348 }
    6349 
    6350 VkDeviceSize VmaAllocation_T::GetOffset() const
    6351 {
    6352  switch(m_Type)
    6353  {
    6354  case ALLOCATION_TYPE_BLOCK:
    6355  return m_BlockAllocation.m_Offset;
    6356  case ALLOCATION_TYPE_DEDICATED:
    6357  return 0;
    6358  default:
    6359  VMA_ASSERT(0);
    6360  return 0;
    6361  }
    6362 }
    6363 
    6364 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6365 {
    6366  switch(m_Type)
    6367  {
    6368  case ALLOCATION_TYPE_BLOCK:
    6369  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6370  case ALLOCATION_TYPE_DEDICATED:
    6371  return m_DedicatedAllocation.m_hMemory;
    6372  default:
    6373  VMA_ASSERT(0);
    6374  return VK_NULL_HANDLE;
    6375  }
    6376 }
    6377 
    6378 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6379 {
    6380  switch(m_Type)
    6381  {
    6382  case ALLOCATION_TYPE_BLOCK:
    6383  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6384  case ALLOCATION_TYPE_DEDICATED:
    6385  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6386  default:
    6387  VMA_ASSERT(0);
    6388  return UINT32_MAX;
    6389  }
    6390 }
    6391 
    6392 void* VmaAllocation_T::GetMappedData() const
    6393 {
    6394  switch(m_Type)
    6395  {
    6396  case ALLOCATION_TYPE_BLOCK:
    6397  if(m_MapCount != 0)
    6398  {
    6399  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6400  VMA_ASSERT(pBlockData != VMA_NULL);
    6401  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6402  }
    6403  else
    6404  {
    6405  return VMA_NULL;
    6406  }
    6407  break;
    6408  case ALLOCATION_TYPE_DEDICATED:
    6409  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6410  return m_DedicatedAllocation.m_pMappedData;
    6411  default:
    6412  VMA_ASSERT(0);
    6413  return VMA_NULL;
    6414  }
    6415 }
    6416 
    6417 bool VmaAllocation_T::CanBecomeLost() const
    6418 {
    6419  switch(m_Type)
    6420  {
    6421  case ALLOCATION_TYPE_BLOCK:
    6422  return m_BlockAllocation.m_CanBecomeLost;
    6423  case ALLOCATION_TYPE_DEDICATED:
    6424  return false;
    6425  default:
    6426  VMA_ASSERT(0);
    6427  return false;
    6428  }
    6429 }
    6430 
    6431 VmaPool VmaAllocation_T::GetPool() const
    6432 {
    6433  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6434  return m_BlockAllocation.m_hPool;
    6435 }
    6436 
    6437 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6438 {
    6439  VMA_ASSERT(CanBecomeLost());
    6440 
    6441  /*
    6442  Warning: This is a carefully designed algorithm.
    6443  Do not modify unless you really know what you're doing :)
    6444  */
    6445  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6446  for(;;)
    6447  {
    6448  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6449  {
    6450  VMA_ASSERT(0);
    6451  return false;
    6452  }
    6453  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6454  {
    6455  return false;
    6456  }
    6457  else // Last use time earlier than current time.
    6458  {
    6459  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6460  {
    6461  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6462  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6463  return true;
    6464  }
    6465  }
    6466  }
    6467 }
    6468 
    6469 #if VMA_STATS_STRING_ENABLED
    6470 
    6471 // Correspond to values of enum VmaSuballocationType.
    6472 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6473  "FREE",
    6474  "UNKNOWN",
    6475  "BUFFER",
    6476  "IMAGE_UNKNOWN",
    6477  "IMAGE_LINEAR",
    6478  "IMAGE_OPTIMAL",
    6479 };
    6480 
    6481 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6482 {
    6483  json.WriteString("Type");
    6484  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6485 
    6486  json.WriteString("Size");
    6487  json.WriteNumber(m_Size);
    6488 
    6489  if(m_pUserData != VMA_NULL)
    6490  {
    6491  json.WriteString("UserData");
    6492  if(IsUserDataString())
    6493  {
    6494  json.WriteString((const char*)m_pUserData);
    6495  }
    6496  else
    6497  {
    6498  json.BeginString();
    6499  json.ContinueString_Pointer(m_pUserData);
    6500  json.EndString();
    6501  }
    6502  }
    6503 
    6504  json.WriteString("CreationFrameIndex");
    6505  json.WriteNumber(m_CreationFrameIndex);
    6506 
    6507  json.WriteString("LastUseFrameIndex");
    6508  json.WriteNumber(GetLastUseFrameIndex());
    6509 
    6510  if(m_BufferImageUsage != 0)
    6511  {
    6512  json.WriteString("Usage");
    6513  json.WriteNumber(m_BufferImageUsage);
    6514  }
    6515 }
    6516 
    6517 #endif
    6518 
    6519 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6520 {
    6521  VMA_ASSERT(IsUserDataString());
    6522  if(m_pUserData != VMA_NULL)
    6523  {
    6524  char* const oldStr = (char*)m_pUserData;
    6525  const size_t oldStrLen = strlen(oldStr);
    6526  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6527  m_pUserData = VMA_NULL;
    6528  }
    6529 }
    6530 
    6531 void VmaAllocation_T::BlockAllocMap()
    6532 {
    6533  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6534 
    6535  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6536  {
    6537  ++m_MapCount;
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6542  }
    6543 }
    6544 
    6545 void VmaAllocation_T::BlockAllocUnmap()
    6546 {
    6547  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6548 
    6549  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6550  {
    6551  --m_MapCount;
    6552  }
    6553  else
    6554  {
    6555  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6556  }
    6557 }
    6558 
    6559 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6560 {
    6561  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6562 
    6563  if(m_MapCount != 0)
    6564  {
    6565  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6566  {
    6567  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6568  *ppData = m_DedicatedAllocation.m_pMappedData;
    6569  ++m_MapCount;
    6570  return VK_SUCCESS;
    6571  }
    6572  else
    6573  {
    6574  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6575  return VK_ERROR_MEMORY_MAP_FAILED;
    6576  }
    6577  }
    6578  else
    6579  {
    6580  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6581  hAllocator->m_hDevice,
    6582  m_DedicatedAllocation.m_hMemory,
    6583  0, // offset
    6584  VK_WHOLE_SIZE,
    6585  0, // flags
    6586  ppData);
    6587  if(result == VK_SUCCESS)
    6588  {
    6589  m_DedicatedAllocation.m_pMappedData = *ppData;
    6590  m_MapCount = 1;
    6591  }
    6592  return result;
    6593  }
    6594 }
    6595 
    6596 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6597 {
    6598  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6599 
    6600  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6601  {
    6602  --m_MapCount;
    6603  if(m_MapCount == 0)
    6604  {
    6605  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6606  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6607  hAllocator->m_hDevice,
    6608  m_DedicatedAllocation.m_hMemory);
    6609  }
    6610  }
    6611  else
    6612  {
    6613  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6614  }
    6615 }
    6616 
    6617 #if VMA_STATS_STRING_ENABLED
    6618 
    6619 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6620 {
    6621  json.BeginObject();
    6622 
    6623  json.WriteString("Blocks");
    6624  json.WriteNumber(stat.blockCount);
    6625 
    6626  json.WriteString("Allocations");
    6627  json.WriteNumber(stat.allocationCount);
    6628 
    6629  json.WriteString("UnusedRanges");
    6630  json.WriteNumber(stat.unusedRangeCount);
    6631 
    6632  json.WriteString("UsedBytes");
    6633  json.WriteNumber(stat.usedBytes);
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(stat.unusedBytes);
    6637 
    6638  if(stat.allocationCount > 1)
    6639  {
    6640  json.WriteString("AllocationSize");
    6641  json.BeginObject(true);
    6642  json.WriteString("Min");
    6643  json.WriteNumber(stat.allocationSizeMin);
    6644  json.WriteString("Avg");
    6645  json.WriteNumber(stat.allocationSizeAvg);
    6646  json.WriteString("Max");
    6647  json.WriteNumber(stat.allocationSizeMax);
    6648  json.EndObject();
    6649  }
    6650 
    6651  if(stat.unusedRangeCount > 1)
    6652  {
    6653  json.WriteString("UnusedRangeSize");
    6654  json.BeginObject(true);
    6655  json.WriteString("Min");
    6656  json.WriteNumber(stat.unusedRangeSizeMin);
    6657  json.WriteString("Avg");
    6658  json.WriteNumber(stat.unusedRangeSizeAvg);
    6659  json.WriteString("Max");
    6660  json.WriteNumber(stat.unusedRangeSizeMax);
    6661  json.EndObject();
    6662  }
    6663 
    6664  json.EndObject();
    6665 }
    6666 
    6667 #endif // #if VMA_STATS_STRING_ENABLED
    6668 
    6669 struct VmaSuballocationItemSizeLess
    6670 {
    6671  bool operator()(
    6672  const VmaSuballocationList::iterator lhs,
    6673  const VmaSuballocationList::iterator rhs) const
    6674  {
    6675  return lhs->size < rhs->size;
    6676  }
    6677  bool operator()(
    6678  const VmaSuballocationList::iterator lhs,
    6679  VkDeviceSize rhsSize) const
    6680  {
    6681  return lhs->size < rhsSize;
    6682  }
    6683 };
    6684 
    6685 
    6687 // class VmaBlockMetadata
    6688 
    6689 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6690  m_Size(0),
    6691  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6692 {
    6693 }
    6694 
    6695 #if VMA_STATS_STRING_ENABLED
    6696 
    6697 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6698  VkDeviceSize unusedBytes,
    6699  size_t allocationCount,
    6700  size_t unusedRangeCount) const
    6701 {
    6702  json.BeginObject();
    6703 
    6704  json.WriteString("TotalBytes");
    6705  json.WriteNumber(GetSize());
    6706 
    6707  json.WriteString("UnusedBytes");
    6708  json.WriteNumber(unusedBytes);
    6709 
    6710  json.WriteString("Allocations");
    6711  json.WriteNumber((uint64_t)allocationCount);
    6712 
    6713  json.WriteString("UnusedRanges");
    6714  json.WriteNumber((uint64_t)unusedRangeCount);
    6715 
    6716  json.WriteString("Suballocations");
    6717  json.BeginArray();
    6718 }
    6719 
    6720 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6721  VkDeviceSize offset,
    6722  VmaAllocation hAllocation) const
    6723 {
    6724  json.BeginObject(true);
    6725 
    6726  json.WriteString("Offset");
    6727  json.WriteNumber(offset);
    6728 
    6729  hAllocation->PrintParameters(json);
    6730 
    6731  json.EndObject();
    6732 }
    6733 
    6734 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6735  VkDeviceSize offset,
    6736  VkDeviceSize size) const
    6737 {
    6738  json.BeginObject(true);
    6739 
    6740  json.WriteString("Offset");
    6741  json.WriteNumber(offset);
    6742 
    6743  json.WriteString("Type");
    6744  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6745 
    6746  json.WriteString("Size");
    6747  json.WriteNumber(size);
    6748 
    6749  json.EndObject();
    6750 }
    6751 
    6752 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6753 {
    6754  json.EndArray();
    6755  json.EndObject();
    6756 }
    6757 
    6758 #endif // #if VMA_STATS_STRING_ENABLED
    6759 
    6761 // class VmaBlockMetadata_Generic
    6762 
    6763 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6764  VmaBlockMetadata(hAllocator),
    6765  m_FreeCount(0),
    6766  m_SumFreeSize(0),
    6767  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6768  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6769 {
    6770 }
    6771 
    6772 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6773 {
    6774 }
    6775 
    6776 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6777 {
    6778  VmaBlockMetadata::Init(size);
    6779 
    6780  m_FreeCount = 1;
    6781  m_SumFreeSize = size;
    6782 
    6783  VmaSuballocation suballoc = {};
    6784  suballoc.offset = 0;
    6785  suballoc.size = size;
    6786  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6787  suballoc.hAllocation = VK_NULL_HANDLE;
    6788 
    6789  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6790  m_Suballocations.push_back(suballoc);
    6791  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6792  --suballocItem;
    6793  m_FreeSuballocationsBySize.push_back(suballocItem);
    6794 }
    6795 
    6796 bool VmaBlockMetadata_Generic::Validate() const
    6797 {
    6798  VMA_VALIDATE(!m_Suballocations.empty());
    6799 
    6800  // Expected offset of new suballocation as calculated from previous ones.
    6801  VkDeviceSize calculatedOffset = 0;
    6802  // Expected number of free suballocations as calculated from traversing their list.
    6803  uint32_t calculatedFreeCount = 0;
    6804  // Expected sum size of free suballocations as calculated from traversing their list.
    6805  VkDeviceSize calculatedSumFreeSize = 0;
    6806  // Expected number of free suballocations that should be registered in
    6807  // m_FreeSuballocationsBySize calculated from traversing their list.
    6808  size_t freeSuballocationsToRegister = 0;
    6809  // True if previous visited suballocation was free.
    6810  bool prevFree = false;
    6811 
    6812  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6813  suballocItem != m_Suballocations.cend();
    6814  ++suballocItem)
    6815  {
    6816  const VmaSuballocation& subAlloc = *suballocItem;
    6817 
    6818  // Actual offset of this suballocation doesn't match expected one.
    6819  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6820 
    6821  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6822  // Two adjacent free suballocations are invalid. They should be merged.
    6823  VMA_VALIDATE(!prevFree || !currFree);
    6824 
    6825  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6826 
    6827  if(currFree)
    6828  {
    6829  calculatedSumFreeSize += subAlloc.size;
    6830  ++calculatedFreeCount;
    6831  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6832  {
    6833  ++freeSuballocationsToRegister;
    6834  }
    6835 
    6836  // Margin required between allocations - every free space must be at least that large.
    6837  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6838  }
    6839  else
    6840  {
    6841  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6843 
    6844  // Margin required between allocations - previous allocation must be free.
    6845  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6846  }
    6847 
    6848  calculatedOffset += subAlloc.size;
    6849  prevFree = currFree;
    6850  }
    6851 
    6852  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6853  // match expected one.
    6854  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6855 
    6856  VkDeviceSize lastSize = 0;
    6857  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6858  {
    6859  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6860 
    6861  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6862  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6863  // They must be sorted by size ascending.
    6864  VMA_VALIDATE(suballocItem->size >= lastSize);
    6865 
    6866  lastSize = suballocItem->size;
    6867  }
    6868 
    6869  // Check if totals match calculacted values.
    6870  VMA_VALIDATE(ValidateFreeSuballocationList());
    6871  VMA_VALIDATE(calculatedOffset == GetSize());
    6872  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6873  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6874 
    6875  return true;
    6876 }
    6877 
    6878 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6879 {
    6880  if(!m_FreeSuballocationsBySize.empty())
    6881  {
    6882  return m_FreeSuballocationsBySize.back()->size;
    6883  }
    6884  else
    6885  {
    6886  return 0;
    6887  }
    6888 }
    6889 
    6890 bool VmaBlockMetadata_Generic::IsEmpty() const
    6891 {
    6892  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6893 }
    6894 
    6895 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6896 {
    6897  outInfo.blockCount = 1;
    6898 
    6899  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6900  outInfo.allocationCount = rangeCount - m_FreeCount;
    6901  outInfo.unusedRangeCount = m_FreeCount;
    6902 
    6903  outInfo.unusedBytes = m_SumFreeSize;
    6904  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6905 
    6906  outInfo.allocationSizeMin = UINT64_MAX;
    6907  outInfo.allocationSizeMax = 0;
    6908  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6909  outInfo.unusedRangeSizeMax = 0;
    6910 
    6911  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6912  suballocItem != m_Suballocations.cend();
    6913  ++suballocItem)
    6914  {
    6915  const VmaSuballocation& suballoc = *suballocItem;
    6916  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6917  {
    6918  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6919  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6920  }
    6921  else
    6922  {
    6923  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6924  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6925  }
    6926  }
    6927 }
    6928 
    6929 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6930 {
    6931  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6932 
    6933  inoutStats.size += GetSize();
    6934  inoutStats.unusedSize += m_SumFreeSize;
    6935  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6936  inoutStats.unusedRangeCount += m_FreeCount;
    6937  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6938 }
    6939 
    6940 #if VMA_STATS_STRING_ENABLED
    6941 
    6942 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6943 {
    6944  PrintDetailedMap_Begin(json,
    6945  m_SumFreeSize, // unusedBytes
    6946  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6947  m_FreeCount); // unusedRangeCount
    6948 
    6949  size_t i = 0;
    6950  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6951  suballocItem != m_Suballocations.cend();
    6952  ++suballocItem, ++i)
    6953  {
    6954  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6955  {
    6956  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6957  }
    6958  else
    6959  {
    6960  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6961  }
    6962  }
    6963 
    6964  PrintDetailedMap_End(json);
    6965 }
    6966 
    6967 #endif // #if VMA_STATS_STRING_ENABLED
    6968 
    6969 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6970  uint32_t currentFrameIndex,
    6971  uint32_t frameInUseCount,
    6972  VkDeviceSize bufferImageGranularity,
    6973  VkDeviceSize allocSize,
    6974  VkDeviceSize allocAlignment,
    6975  bool upperAddress,
    6976  VmaSuballocationType allocType,
    6977  bool canMakeOtherLost,
    6978  uint32_t strategy,
    6979  VmaAllocationRequest* pAllocationRequest)
    6980 {
    6981  VMA_ASSERT(allocSize > 0);
    6982  VMA_ASSERT(!upperAddress);
    6983  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6984  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6985  VMA_HEAVY_ASSERT(Validate());
    6986 
    6987  // There is not enough total free space in this block to fullfill the request: Early return.
    6988  if(canMakeOtherLost == false &&
    6989  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6990  {
    6991  return false;
    6992  }
    6993 
    6994  // New algorithm, efficiently searching freeSuballocationsBySize.
    6995  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6996  if(freeSuballocCount > 0)
    6997  {
    6999  {
    7000  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7001  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7002  m_FreeSuballocationsBySize.data(),
    7003  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7004  allocSize + 2 * VMA_DEBUG_MARGIN,
    7005  VmaSuballocationItemSizeLess());
    7006  size_t index = it - m_FreeSuballocationsBySize.data();
    7007  for(; index < freeSuballocCount; ++index)
    7008  {
    7009  if(CheckAllocation(
    7010  currentFrameIndex,
    7011  frameInUseCount,
    7012  bufferImageGranularity,
    7013  allocSize,
    7014  allocAlignment,
    7015  allocType,
    7016  m_FreeSuballocationsBySize[index],
    7017  false, // canMakeOtherLost
    7018  &pAllocationRequest->offset,
    7019  &pAllocationRequest->itemsToMakeLostCount,
    7020  &pAllocationRequest->sumFreeSize,
    7021  &pAllocationRequest->sumItemSize))
    7022  {
    7023  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7024  return true;
    7025  }
    7026  }
    7027  }
    7028  else // WORST_FIT, FIRST_FIT
    7029  {
    7030  // Search staring from biggest suballocations.
    7031  for(size_t index = freeSuballocCount; index--; )
    7032  {
    7033  if(CheckAllocation(
    7034  currentFrameIndex,
    7035  frameInUseCount,
    7036  bufferImageGranularity,
    7037  allocSize,
    7038  allocAlignment,
    7039  allocType,
    7040  m_FreeSuballocationsBySize[index],
    7041  false, // canMakeOtherLost
    7042  &pAllocationRequest->offset,
    7043  &pAllocationRequest->itemsToMakeLostCount,
    7044  &pAllocationRequest->sumFreeSize,
    7045  &pAllocationRequest->sumItemSize))
    7046  {
    7047  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7048  return true;
    7049  }
    7050  }
    7051  }
    7052  }
    7053 
    7054  if(canMakeOtherLost)
    7055  {
    7056  // Brute-force algorithm. TODO: Come up with something better.
    7057 
    7058  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7059  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7060 
    7061  VmaAllocationRequest tmpAllocRequest = {};
    7062  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7063  suballocIt != m_Suballocations.end();
    7064  ++suballocIt)
    7065  {
    7066  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7067  suballocIt->hAllocation->CanBecomeLost())
    7068  {
    7069  if(CheckAllocation(
    7070  currentFrameIndex,
    7071  frameInUseCount,
    7072  bufferImageGranularity,
    7073  allocSize,
    7074  allocAlignment,
    7075  allocType,
    7076  suballocIt,
    7077  canMakeOtherLost,
    7078  &tmpAllocRequest.offset,
    7079  &tmpAllocRequest.itemsToMakeLostCount,
    7080  &tmpAllocRequest.sumFreeSize,
    7081  &tmpAllocRequest.sumItemSize))
    7082  {
    7083  tmpAllocRequest.item = suballocIt;
    7084 
    7085  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7087  {
    7088  *pAllocationRequest = tmpAllocRequest;
    7089  }
    7090  }
    7091  }
    7092  }
    7093 
    7094  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7095  {
    7096  return true;
    7097  }
    7098  }
    7099 
    7100  return false;
    7101 }
    7102 
    7103 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7104  uint32_t currentFrameIndex,
    7105  uint32_t frameInUseCount,
    7106  VmaAllocationRequest* pAllocationRequest)
    7107 {
    7108  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7109  {
    7110  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7111  {
    7112  ++pAllocationRequest->item;
    7113  }
    7114  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7115  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7117  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7118  {
    7119  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7120  --pAllocationRequest->itemsToMakeLostCount;
    7121  }
    7122  else
    7123  {
    7124  return false;
    7125  }
    7126  }
    7127 
    7128  VMA_HEAVY_ASSERT(Validate());
    7129  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7130  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7131 
    7132  return true;
    7133 }
    7134 
    7135 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7136 {
    7137  uint32_t lostAllocationCount = 0;
    7138  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7139  it != m_Suballocations.end();
    7140  ++it)
    7141  {
    7142  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7143  it->hAllocation->CanBecomeLost() &&
    7144  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7145  {
    7146  it = FreeSuballocation(it);
    7147  ++lostAllocationCount;
    7148  }
    7149  }
    7150  return lostAllocationCount;
    7151 }
    7152 
    7153 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7154 {
    7155  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7156  it != m_Suballocations.end();
    7157  ++it)
    7158  {
    7159  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7160  {
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7167  {
    7168  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7169  return VK_ERROR_VALIDATION_FAILED_EXT;
    7170  }
    7171  }
    7172  }
    7173 
    7174  return VK_SUCCESS;
    7175 }
    7176 
    7177 void VmaBlockMetadata_Generic::Alloc(
    7178  const VmaAllocationRequest& request,
    7179  VmaSuballocationType type,
    7180  VkDeviceSize allocSize,
    7181  bool upperAddress,
    7182  VmaAllocation hAllocation)
    7183 {
    7184  VMA_ASSERT(!upperAddress);
    7185  VMA_ASSERT(request.item != m_Suballocations.end());
    7186  VmaSuballocation& suballoc = *request.item;
    7187  // Given suballocation is a free block.
    7188  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7189  // Given offset is inside this suballocation.
    7190  VMA_ASSERT(request.offset >= suballoc.offset);
    7191  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7192  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7193  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7194 
    7195  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7196  // it to become used.
    7197  UnregisterFreeSuballocation(request.item);
    7198 
    7199  suballoc.offset = request.offset;
    7200  suballoc.size = allocSize;
    7201  suballoc.type = type;
    7202  suballoc.hAllocation = hAllocation;
    7203 
    7204  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7205  if(paddingEnd)
    7206  {
    7207  VmaSuballocation paddingSuballoc = {};
    7208  paddingSuballoc.offset = request.offset + allocSize;
    7209  paddingSuballoc.size = paddingEnd;
    7210  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7211  VmaSuballocationList::iterator next = request.item;
    7212  ++next;
    7213  const VmaSuballocationList::iterator paddingEndItem =
    7214  m_Suballocations.insert(next, paddingSuballoc);
    7215  RegisterFreeSuballocation(paddingEndItem);
    7216  }
    7217 
    7218  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7219  if(paddingBegin)
    7220  {
    7221  VmaSuballocation paddingSuballoc = {};
    7222  paddingSuballoc.offset = request.offset - paddingBegin;
    7223  paddingSuballoc.size = paddingBegin;
    7224  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7225  const VmaSuballocationList::iterator paddingBeginItem =
    7226  m_Suballocations.insert(request.item, paddingSuballoc);
    7227  RegisterFreeSuballocation(paddingBeginItem);
    7228  }
    7229 
    7230  // Update totals.
    7231  m_FreeCount = m_FreeCount - 1;
    7232  if(paddingBegin > 0)
    7233  {
    7234  ++m_FreeCount;
    7235  }
    7236  if(paddingEnd > 0)
    7237  {
    7238  ++m_FreeCount;
    7239  }
    7240  m_SumFreeSize -= allocSize;
    7241 }
    7242 
    7243 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7244 {
    7245  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7246  suballocItem != m_Suballocations.end();
    7247  ++suballocItem)
    7248  {
    7249  VmaSuballocation& suballoc = *suballocItem;
    7250  if(suballoc.hAllocation == allocation)
    7251  {
    7252  FreeSuballocation(suballocItem);
    7253  VMA_HEAVY_ASSERT(Validate());
    7254  return;
    7255  }
    7256  }
    7257  VMA_ASSERT(0 && "Not found!");
    7258 }
    7259 
    7260 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7261 {
    7262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7263  suballocItem != m_Suballocations.end();
    7264  ++suballocItem)
    7265  {
    7266  VmaSuballocation& suballoc = *suballocItem;
    7267  if(suballoc.offset == offset)
    7268  {
    7269  FreeSuballocation(suballocItem);
    7270  return;
    7271  }
    7272  }
    7273  VMA_ASSERT(0 && "Not found!");
    7274 }
    7275 
    7276 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7277 {
    7278  typedef VmaSuballocationList::iterator iter_type;
    7279  for(iter_type suballocItem = m_Suballocations.begin();
    7280  suballocItem != m_Suballocations.end();
    7281  ++suballocItem)
    7282  {
    7283  VmaSuballocation& suballoc = *suballocItem;
    7284  if(suballoc.hAllocation == alloc)
    7285  {
    7286  iter_type nextItem = suballocItem;
    7287  ++nextItem;
    7288 
    7289  // Should have been ensured on higher level.
    7290  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7291 
    7292  // Shrinking.
    7293  if(newSize < alloc->GetSize())
    7294  {
    7295  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7296 
    7297  // There is next item.
    7298  if(nextItem != m_Suballocations.end())
    7299  {
    7300  // Next item is free.
    7301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7302  {
    7303  // Grow this next item backward.
    7304  UnregisterFreeSuballocation(nextItem);
    7305  nextItem->offset -= sizeDiff;
    7306  nextItem->size += sizeDiff;
    7307  RegisterFreeSuballocation(nextItem);
    7308  }
    7309  // Next item is not free.
    7310  else
    7311  {
    7312  // Create free item after current one.
    7313  VmaSuballocation newFreeSuballoc;
    7314  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7315  newFreeSuballoc.offset = suballoc.offset + newSize;
    7316  newFreeSuballoc.size = sizeDiff;
    7317  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7318  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7319  RegisterFreeSuballocation(newFreeSuballocIt);
    7320 
    7321  ++m_FreeCount;
    7322  }
    7323  }
    7324  // This is the last item.
    7325  else
    7326  {
    7327  // Create free item at the end.
    7328  VmaSuballocation newFreeSuballoc;
    7329  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7330  newFreeSuballoc.offset = suballoc.offset + newSize;
    7331  newFreeSuballoc.size = sizeDiff;
    7332  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7333  m_Suballocations.push_back(newFreeSuballoc);
    7334 
    7335  iter_type newFreeSuballocIt = m_Suballocations.end();
    7336  RegisterFreeSuballocation(--newFreeSuballocIt);
    7337 
    7338  ++m_FreeCount;
    7339  }
    7340 
    7341  suballoc.size = newSize;
    7342  m_SumFreeSize += sizeDiff;
    7343  }
    7344  // Growing.
    7345  else
    7346  {
    7347  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7348 
    7349  // There is next item.
    7350  if(nextItem != m_Suballocations.end())
    7351  {
    7352  // Next item is free.
    7353  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7354  {
    7355  // There is not enough free space, including margin.
    7356  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7357  {
    7358  return false;
    7359  }
    7360 
    7361  // There is more free space than required.
    7362  if(nextItem->size > sizeDiff)
    7363  {
    7364  // Move and shrink this next item.
    7365  UnregisterFreeSuballocation(nextItem);
    7366  nextItem->offset += sizeDiff;
    7367  nextItem->size -= sizeDiff;
    7368  RegisterFreeSuballocation(nextItem);
    7369  }
    7370  // There is exactly the amount of free space required.
    7371  else
    7372  {
    7373  // Remove this next free item.
    7374  UnregisterFreeSuballocation(nextItem);
    7375  m_Suballocations.erase(nextItem);
    7376  --m_FreeCount;
    7377  }
    7378  }
    7379  // Next item is not free - there is no space to grow.
    7380  else
    7381  {
    7382  return false;
    7383  }
    7384  }
    7385  // This is the last item - there is no space to grow.
    7386  else
    7387  {
    7388  return false;
    7389  }
    7390 
    7391  suballoc.size = newSize;
    7392  m_SumFreeSize -= sizeDiff;
    7393  }
    7394 
    7395  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7396  return true;
    7397  }
    7398  }
    7399  VMA_ASSERT(0 && "Not found!");
    7400  return false;
    7401 }
    7402 
    7403 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7404 {
    7405  VkDeviceSize lastSize = 0;
    7406  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7407  {
    7408  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7409 
    7410  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7411  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7412  VMA_VALIDATE(it->size >= lastSize);
    7413  lastSize = it->size;
    7414  }
    7415  return true;
    7416 }
    7417 
    7418 bool VmaBlockMetadata_Generic::CheckAllocation(
    7419  uint32_t currentFrameIndex,
    7420  uint32_t frameInUseCount,
    7421  VkDeviceSize bufferImageGranularity,
    7422  VkDeviceSize allocSize,
    7423  VkDeviceSize allocAlignment,
    7424  VmaSuballocationType allocType,
    7425  VmaSuballocationList::const_iterator suballocItem,
    7426  bool canMakeOtherLost,
    7427  VkDeviceSize* pOffset,
    7428  size_t* itemsToMakeLostCount,
    7429  VkDeviceSize* pSumFreeSize,
    7430  VkDeviceSize* pSumItemSize) const
    7431 {
    7432  VMA_ASSERT(allocSize > 0);
    7433  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7434  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7435  VMA_ASSERT(pOffset != VMA_NULL);
    7436 
    7437  *itemsToMakeLostCount = 0;
    7438  *pSumFreeSize = 0;
    7439  *pSumItemSize = 0;
    7440 
    7441  if(canMakeOtherLost)
    7442  {
    7443  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7444  {
    7445  *pSumFreeSize = suballocItem->size;
    7446  }
    7447  else
    7448  {
    7449  if(suballocItem->hAllocation->CanBecomeLost() &&
    7450  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7451  {
    7452  ++*itemsToMakeLostCount;
    7453  *pSumItemSize = suballocItem->size;
    7454  }
    7455  else
    7456  {
    7457  return false;
    7458  }
    7459  }
    7460 
    7461  // Remaining size is too small for this request: Early return.
    7462  if(GetSize() - suballocItem->offset < allocSize)
    7463  {
    7464  return false;
    7465  }
    7466 
    7467  // Start from offset equal to beginning of this suballocation.
    7468  *pOffset = suballocItem->offset;
    7469 
    7470  // Apply VMA_DEBUG_MARGIN at the beginning.
    7471  if(VMA_DEBUG_MARGIN > 0)
    7472  {
    7473  *pOffset += VMA_DEBUG_MARGIN;
    7474  }
    7475 
    7476  // Apply alignment.
    7477  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7478 
    7479  // Check previous suballocations for BufferImageGranularity conflicts.
    7480  // Make bigger alignment if necessary.
    7481  if(bufferImageGranularity > 1)
    7482  {
    7483  bool bufferImageGranularityConflict = false;
    7484  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7485  while(prevSuballocItem != m_Suballocations.cbegin())
    7486  {
    7487  --prevSuballocItem;
    7488  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7489  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7490  {
    7491  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7492  {
    7493  bufferImageGranularityConflict = true;
    7494  break;
    7495  }
    7496  }
    7497  else
    7498  // Already on previous page.
    7499  break;
    7500  }
    7501  if(bufferImageGranularityConflict)
    7502  {
    7503  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7504  }
    7505  }
    7506 
    7507  // Now that we have final *pOffset, check if we are past suballocItem.
    7508  // If yes, return false - this function should be called for another suballocItem as starting point.
    7509  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7510  {
    7511  return false;
    7512  }
    7513 
    7514  // Calculate padding at the beginning based on current offset.
    7515  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7516 
    7517  // Calculate required margin at the end.
    7518  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7519 
    7520  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7521  // Another early return check.
    7522  if(suballocItem->offset + totalSize > GetSize())
    7523  {
    7524  return false;
    7525  }
    7526 
    7527  // Advance lastSuballocItem until desired size is reached.
    7528  // Update itemsToMakeLostCount.
    7529  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7530  if(totalSize > suballocItem->size)
    7531  {
    7532  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7533  while(remainingSize > 0)
    7534  {
    7535  ++lastSuballocItem;
    7536  if(lastSuballocItem == m_Suballocations.cend())
    7537  {
    7538  return false;
    7539  }
    7540  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7541  {
    7542  *pSumFreeSize += lastSuballocItem->size;
    7543  }
    7544  else
    7545  {
    7546  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7547  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7548  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7549  {
    7550  ++*itemsToMakeLostCount;
    7551  *pSumItemSize += lastSuballocItem->size;
    7552  }
    7553  else
    7554  {
    7555  return false;
    7556  }
    7557  }
    7558  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7559  remainingSize - lastSuballocItem->size : 0;
    7560  }
    7561  }
    7562 
    7563  // Check next suballocations for BufferImageGranularity conflicts.
    7564  // If conflict exists, we must mark more allocations lost or fail.
    7565  if(bufferImageGranularity > 1)
    7566  {
    7567  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7568  ++nextSuballocItem;
    7569  while(nextSuballocItem != m_Suballocations.cend())
    7570  {
    7571  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7572  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7573  {
    7574  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7575  {
    7576  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7577  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7578  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7579  {
    7580  ++*itemsToMakeLostCount;
    7581  }
    7582  else
    7583  {
    7584  return false;
    7585  }
    7586  }
    7587  }
    7588  else
    7589  {
    7590  // Already on next page.
    7591  break;
    7592  }
    7593  ++nextSuballocItem;
    7594  }
    7595  }
    7596  }
    7597  else
    7598  {
    7599  const VmaSuballocation& suballoc = *suballocItem;
    7600  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7601 
    7602  *pSumFreeSize = suballoc.size;
    7603 
    7604  // Size of this suballocation is too small for this request: Early return.
    7605  if(suballoc.size < allocSize)
    7606  {
    7607  return false;
    7608  }
    7609 
    7610  // Start from offset equal to beginning of this suballocation.
    7611  *pOffset = suballoc.offset;
    7612 
    7613  // Apply VMA_DEBUG_MARGIN at the beginning.
    7614  if(VMA_DEBUG_MARGIN > 0)
    7615  {
    7616  *pOffset += VMA_DEBUG_MARGIN;
    7617  }
    7618 
    7619  // Apply alignment.
    7620  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7621 
    7622  // Check previous suballocations for BufferImageGranularity conflicts.
    7623  // Make bigger alignment if necessary.
    7624  if(bufferImageGranularity > 1)
    7625  {
    7626  bool bufferImageGranularityConflict = false;
    7627  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7628  while(prevSuballocItem != m_Suballocations.cbegin())
    7629  {
    7630  --prevSuballocItem;
    7631  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7633  {
    7634  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7635  {
    7636  bufferImageGranularityConflict = true;
    7637  break;
    7638  }
    7639  }
    7640  else
    7641  // Already on previous page.
    7642  break;
    7643  }
    7644  if(bufferImageGranularityConflict)
    7645  {
    7646  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7647  }
    7648  }
    7649 
    7650  // Calculate padding at the beginning based on current offset.
    7651  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7652 
    7653  // Calculate required margin at the end.
    7654  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7655 
    7656  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7657  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7658  {
    7659  return false;
    7660  }
    7661 
    7662  // Check next suballocations for BufferImageGranularity conflicts.
    7663  // If conflict exists, allocation cannot be made here.
    7664  if(bufferImageGranularity > 1)
    7665  {
    7666  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7667  ++nextSuballocItem;
    7668  while(nextSuballocItem != m_Suballocations.cend())
    7669  {
    7670  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7671  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7672  {
    7673  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7674  {
    7675  return false;
    7676  }
    7677  }
    7678  else
    7679  {
    7680  // Already on next page.
    7681  break;
    7682  }
    7683  ++nextSuballocItem;
    7684  }
    7685  }
    7686  }
    7687 
    7688  // All tests passed: Success. pOffset is already filled.
    7689  return true;
    7690 }
    7691 
    7692 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7693 {
    7694  VMA_ASSERT(item != m_Suballocations.end());
    7695  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  VmaSuballocationList::iterator nextItem = item;
    7698  ++nextItem;
    7699  VMA_ASSERT(nextItem != m_Suballocations.end());
    7700  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7701 
    7702  item->size += nextItem->size;
    7703  --m_FreeCount;
    7704  m_Suballocations.erase(nextItem);
    7705 }
    7706 
    7707 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7708 {
    7709  // Change this suballocation to be marked as free.
    7710  VmaSuballocation& suballoc = *suballocItem;
    7711  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7712  suballoc.hAllocation = VK_NULL_HANDLE;
    7713 
    7714  // Update totals.
    7715  ++m_FreeCount;
    7716  m_SumFreeSize += suballoc.size;
    7717 
    7718  // Merge with previous and/or next suballocation if it's also free.
    7719  bool mergeWithNext = false;
    7720  bool mergeWithPrev = false;
    7721 
    7722  VmaSuballocationList::iterator nextItem = suballocItem;
    7723  ++nextItem;
    7724  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7725  {
    7726  mergeWithNext = true;
    7727  }
    7728 
    7729  VmaSuballocationList::iterator prevItem = suballocItem;
    7730  if(suballocItem != m_Suballocations.begin())
    7731  {
    7732  --prevItem;
    7733  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7734  {
    7735  mergeWithPrev = true;
    7736  }
    7737  }
    7738 
    7739  if(mergeWithNext)
    7740  {
    7741  UnregisterFreeSuballocation(nextItem);
    7742  MergeFreeWithNext(suballocItem);
    7743  }
    7744 
    7745  if(mergeWithPrev)
    7746  {
    7747  UnregisterFreeSuballocation(prevItem);
    7748  MergeFreeWithNext(prevItem);
    7749  RegisterFreeSuballocation(prevItem);
    7750  return prevItem;
    7751  }
    7752  else
    7753  {
    7754  RegisterFreeSuballocation(suballocItem);
    7755  return suballocItem;
    7756  }
    7757 }
    7758 
    7759 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7760 {
    7761  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7762  VMA_ASSERT(item->size > 0);
    7763 
    7764  // You may want to enable this validation at the beginning or at the end of
    7765  // this function, depending on what do you want to check.
    7766  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7767 
    7768  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7769  {
    7770  if(m_FreeSuballocationsBySize.empty())
    7771  {
    7772  m_FreeSuballocationsBySize.push_back(item);
    7773  }
    7774  else
    7775  {
    7776  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7777  }
    7778  }
    7779 
    7780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7781 }
    7782 
    7783 
    7784 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7785 {
    7786  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7787  VMA_ASSERT(item->size > 0);
    7788 
    7789  // You may want to enable this validation at the beginning or at the end of
    7790  // this function, depending on what do you want to check.
    7791  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7792 
    7793  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7794  {
    7795  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7796  m_FreeSuballocationsBySize.data(),
    7797  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7798  item,
    7799  VmaSuballocationItemSizeLess());
    7800  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7801  index < m_FreeSuballocationsBySize.size();
    7802  ++index)
    7803  {
    7804  if(m_FreeSuballocationsBySize[index] == item)
    7805  {
    7806  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7807  return;
    7808  }
    7809  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7810  }
    7811  VMA_ASSERT(0 && "Not found.");
    7812  }
    7813 
    7814  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7815 }
    7816 
    7818 // class VmaBlockMetadata_Linear
    7819 
    7820 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7821  VmaBlockMetadata(hAllocator),
    7822  m_SumFreeSize(0),
    7823  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7824  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_1stVectorIndex(0),
    7826  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7827  m_1stNullItemsBeginCount(0),
    7828  m_1stNullItemsMiddleCount(0),
    7829  m_2ndNullItemsCount(0)
    7830 {
    7831 }
    7832 
    7833 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7834 {
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7838 {
    7839  VmaBlockMetadata::Init(size);
    7840  m_SumFreeSize = size;
    7841 }
    7842 
    7843 bool VmaBlockMetadata_Linear::Validate() const
    7844 {
    7845  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7846  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7847 
    7848  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7849  VMA_VALIDATE(!suballocations1st.empty() ||
    7850  suballocations2nd.empty() ||
    7851  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7852 
    7853  if(!suballocations1st.empty())
    7854  {
    7855  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7856  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860  if(!suballocations2nd.empty())
    7861  {
    7862  // Null item at the end should be just pop_back().
    7863  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7864  }
    7865 
    7866  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7867  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7868 
    7869  VkDeviceSize sumUsedSize = 0;
    7870  const size_t suballoc1stCount = suballocations1st.size();
    7871  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7872 
    7873  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7874  {
    7875  const size_t suballoc2ndCount = suballocations2nd.size();
    7876  size_t nullItem2ndCount = 0;
    7877  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7878  {
    7879  const VmaSuballocation& suballoc = suballocations2nd[i];
    7880  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7881 
    7882  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7883  VMA_VALIDATE(suballoc.offset >= offset);
    7884 
    7885  if(!currFree)
    7886  {
    7887  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7888  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7889  sumUsedSize += suballoc.size;
    7890  }
    7891  else
    7892  {
    7893  ++nullItem2ndCount;
    7894  }
    7895 
    7896  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7897  }
    7898 
    7899  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7900  }
    7901 
    7902  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7903  {
    7904  const VmaSuballocation& suballoc = suballocations1st[i];
    7905  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7906  suballoc.hAllocation == VK_NULL_HANDLE);
    7907  }
    7908 
    7909  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7910 
    7911  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7912  {
    7913  const VmaSuballocation& suballoc = suballocations1st[i];
    7914  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7915 
    7916  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7917  VMA_VALIDATE(suballoc.offset >= offset);
    7918  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7919 
    7920  if(!currFree)
    7921  {
    7922  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7923  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7924  sumUsedSize += suballoc.size;
    7925  }
    7926  else
    7927  {
    7928  ++nullItem1stCount;
    7929  }
    7930 
    7931  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7932  }
    7933  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7934 
    7935  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7936  {
    7937  const size_t suballoc2ndCount = suballocations2nd.size();
    7938  size_t nullItem2ndCount = 0;
    7939  for(size_t i = suballoc2ndCount; i--; )
    7940  {
    7941  const VmaSuballocation& suballoc = suballocations2nd[i];
    7942  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7943 
    7944  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7945  VMA_VALIDATE(suballoc.offset >= offset);
    7946 
    7947  if(!currFree)
    7948  {
    7949  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7950  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7951  sumUsedSize += suballoc.size;
    7952  }
    7953  else
    7954  {
    7955  ++nullItem2ndCount;
    7956  }
    7957 
    7958  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7959  }
    7960 
    7961  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7962  }
    7963 
    7964  VMA_VALIDATE(offset <= GetSize());
    7965  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7966 
    7967  return true;
    7968 }
    7969 
    7970 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7971 {
    7972  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7973  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7974 }
    7975 
    7976 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7977 {
    7978  const VkDeviceSize size = GetSize();
    7979 
    7980  /*
    7981  We don't consider gaps inside allocation vectors with freed allocations because
    7982  they are not suitable for reuse in linear allocator. We consider only space that
    7983  is available for new allocations.
    7984  */
    7985  if(IsEmpty())
    7986  {
    7987  return size;
    7988  }
    7989 
    7990  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7991 
    7992  switch(m_2ndVectorMode)
    7993  {
    7994  case SECOND_VECTOR_EMPTY:
    7995  /*
    7996  Available space is after end of 1st, as well as before beginning of 1st (which
    7997  whould make it a ring buffer).
    7998  */
    7999  {
    8000  const size_t suballocations1stCount = suballocations1st.size();
    8001  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8002  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8003  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8004  return VMA_MAX(
    8005  firstSuballoc.offset,
    8006  size - (lastSuballoc.offset + lastSuballoc.size));
    8007  }
    8008  break;
    8009 
    8010  case SECOND_VECTOR_RING_BUFFER:
    8011  /*
    8012  Available space is only between end of 2nd and beginning of 1st.
    8013  */
    8014  {
    8015  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8016  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8017  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8018  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8019  }
    8020  break;
    8021 
    8022  case SECOND_VECTOR_DOUBLE_STACK:
    8023  /*
    8024  Available space is only between end of 1st and top of 2nd.
    8025  */
    8026  {
    8027  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8028  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8029  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8030  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8031  }
    8032  break;
    8033 
    8034  default:
    8035  VMA_ASSERT(0);
    8036  return 0;
    8037  }
    8038 }
    8039 
    8040 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8041 {
    8042  const VkDeviceSize size = GetSize();
    8043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8045  const size_t suballoc1stCount = suballocations1st.size();
    8046  const size_t suballoc2ndCount = suballocations2nd.size();
    8047 
    8048  outInfo.blockCount = 1;
    8049  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8050  outInfo.unusedRangeCount = 0;
    8051  outInfo.usedBytes = 0;
    8052  outInfo.allocationSizeMin = UINT64_MAX;
    8053  outInfo.allocationSizeMax = 0;
    8054  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8055  outInfo.unusedRangeSizeMax = 0;
    8056 
    8057  VkDeviceSize lastOffset = 0;
    8058 
    8059  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8060  {
    8061  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8062  size_t nextAlloc2ndIndex = 0;
    8063  while(lastOffset < freeSpace2ndTo1stEnd)
    8064  {
    8065  // Find next non-null allocation or move nextAllocIndex to the end.
    8066  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8067  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8068  {
    8069  ++nextAlloc2ndIndex;
    8070  }
    8071 
    8072  // Found non-null allocation.
    8073  if(nextAlloc2ndIndex < suballoc2ndCount)
    8074  {
    8075  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8076 
    8077  // 1. Process free space before this allocation.
    8078  if(lastOffset < suballoc.offset)
    8079  {
    8080  // There is free space from lastOffset to suballoc.offset.
    8081  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8082  ++outInfo.unusedRangeCount;
    8083  outInfo.unusedBytes += unusedRangeSize;
    8084  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8085  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8086  }
    8087 
    8088  // 2. Process this allocation.
    8089  // There is allocation with suballoc.offset, suballoc.size.
    8090  outInfo.usedBytes += suballoc.size;
    8091  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8092  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8093 
    8094  // 3. Prepare for next iteration.
    8095  lastOffset = suballoc.offset + suballoc.size;
    8096  ++nextAlloc2ndIndex;
    8097  }
    8098  // We are at the end.
    8099  else
    8100  {
    8101  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8102  if(lastOffset < freeSpace2ndTo1stEnd)
    8103  {
    8104  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8105  ++outInfo.unusedRangeCount;
    8106  outInfo.unusedBytes += unusedRangeSize;
    8107  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8108  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8109  }
    8110 
    8111  // End of loop.
    8112  lastOffset = freeSpace2ndTo1stEnd;
    8113  }
    8114  }
    8115  }
    8116 
    8117  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8118  const VkDeviceSize freeSpace1stTo2ndEnd =
    8119  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8120  while(lastOffset < freeSpace1stTo2ndEnd)
    8121  {
    8122  // Find next non-null allocation or move nextAllocIndex to the end.
    8123  while(nextAlloc1stIndex < suballoc1stCount &&
    8124  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8125  {
    8126  ++nextAlloc1stIndex;
    8127  }
    8128 
    8129  // Found non-null allocation.
    8130  if(nextAlloc1stIndex < suballoc1stCount)
    8131  {
    8132  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8133 
    8134  // 1. Process free space before this allocation.
    8135  if(lastOffset < suballoc.offset)
    8136  {
    8137  // There is free space from lastOffset to suballoc.offset.
    8138  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8139  ++outInfo.unusedRangeCount;
    8140  outInfo.unusedBytes += unusedRangeSize;
    8141  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8142  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8143  }
    8144 
    8145  // 2. Process this allocation.
    8146  // There is allocation with suballoc.offset, suballoc.size.
    8147  outInfo.usedBytes += suballoc.size;
    8148  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8149  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8150 
    8151  // 3. Prepare for next iteration.
    8152  lastOffset = suballoc.offset + suballoc.size;
    8153  ++nextAlloc1stIndex;
    8154  }
    8155  // We are at the end.
    8156  else
    8157  {
    8158  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8159  if(lastOffset < freeSpace1stTo2ndEnd)
    8160  {
    8161  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8162  ++outInfo.unusedRangeCount;
    8163  outInfo.unusedBytes += unusedRangeSize;
    8164  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8165  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8166  }
    8167 
    8168  // End of loop.
    8169  lastOffset = freeSpace1stTo2ndEnd;
    8170  }
    8171  }
    8172 
    8173  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8174  {
    8175  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8176  while(lastOffset < size)
    8177  {
    8178  // Find next non-null allocation or move nextAllocIndex to the end.
    8179  while(nextAlloc2ndIndex != SIZE_MAX &&
    8180  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8181  {
    8182  --nextAlloc2ndIndex;
    8183  }
    8184 
    8185  // Found non-null allocation.
    8186  if(nextAlloc2ndIndex != SIZE_MAX)
    8187  {
    8188  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8189 
    8190  // 1. Process free space before this allocation.
    8191  if(lastOffset < suballoc.offset)
    8192  {
    8193  // There is free space from lastOffset to suballoc.offset.
    8194  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8195  ++outInfo.unusedRangeCount;
    8196  outInfo.unusedBytes += unusedRangeSize;
    8197  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8198  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8199  }
    8200 
    8201  // 2. Process this allocation.
    8202  // There is allocation with suballoc.offset, suballoc.size.
    8203  outInfo.usedBytes += suballoc.size;
    8204  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8205  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8206 
    8207  // 3. Prepare for next iteration.
    8208  lastOffset = suballoc.offset + suballoc.size;
    8209  --nextAlloc2ndIndex;
    8210  }
    8211  // We are at the end.
    8212  else
    8213  {
    8214  // There is free space from lastOffset to size.
    8215  if(lastOffset < size)
    8216  {
    8217  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8218  ++outInfo.unusedRangeCount;
    8219  outInfo.unusedBytes += unusedRangeSize;
    8220  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8221  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8222  }
    8223 
    8224  // End of loop.
    8225  lastOffset = size;
    8226  }
    8227  }
    8228  }
    8229 
    8230  outInfo.unusedBytes = size - outInfo.usedBytes;
    8231 }
    8232 
    8233 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8234 {
    8235  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8236  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8237  const VkDeviceSize size = GetSize();
    8238  const size_t suballoc1stCount = suballocations1st.size();
    8239  const size_t suballoc2ndCount = suballocations2nd.size();
    8240 
    8241  inoutStats.size += size;
    8242 
    8243  VkDeviceSize lastOffset = 0;
    8244 
    8245  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8246  {
    8247  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8248  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8249  while(lastOffset < freeSpace2ndTo1stEnd)
    8250  {
    8251  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8252  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8253  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8254  {
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257 
    8258  // Found non-null allocation.
    8259  if(nextAlloc2ndIndex < suballoc2ndCount)
    8260  {
    8261  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8262 
    8263  // 1. Process free space before this allocation.
    8264  if(lastOffset < suballoc.offset)
    8265  {
    8266  // There is free space from lastOffset to suballoc.offset.
    8267  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8268  inoutStats.unusedSize += unusedRangeSize;
    8269  ++inoutStats.unusedRangeCount;
    8270  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8271  }
    8272 
    8273  // 2. Process this allocation.
    8274  // There is allocation with suballoc.offset, suballoc.size.
    8275  ++inoutStats.allocationCount;
    8276 
    8277  // 3. Prepare for next iteration.
    8278  lastOffset = suballoc.offset + suballoc.size;
    8279  ++nextAlloc2ndIndex;
    8280  }
    8281  // We are at the end.
    8282  else
    8283  {
    8284  if(lastOffset < freeSpace2ndTo1stEnd)
    8285  {
    8286  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8287  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8288  inoutStats.unusedSize += unusedRangeSize;
    8289  ++inoutStats.unusedRangeCount;
    8290  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8291  }
    8292 
    8293  // End of loop.
    8294  lastOffset = freeSpace2ndTo1stEnd;
    8295  }
    8296  }
    8297  }
    8298 
    8299  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8300  const VkDeviceSize freeSpace1stTo2ndEnd =
    8301  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8302  while(lastOffset < freeSpace1stTo2ndEnd)
    8303  {
    8304  // Find next non-null allocation or move nextAllocIndex to the end.
    8305  while(nextAlloc1stIndex < suballoc1stCount &&
    8306  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8307  {
    8308  ++nextAlloc1stIndex;
    8309  }
    8310 
    8311  // Found non-null allocation.
    8312  if(nextAlloc1stIndex < suballoc1stCount)
    8313  {
    8314  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8315 
    8316  // 1. Process free space before this allocation.
    8317  if(lastOffset < suballoc.offset)
    8318  {
    8319  // There is free space from lastOffset to suballoc.offset.
    8320  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8321  inoutStats.unusedSize += unusedRangeSize;
    8322  ++inoutStats.unusedRangeCount;
    8323  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8324  }
    8325 
    8326  // 2. Process this allocation.
    8327  // There is allocation with suballoc.offset, suballoc.size.
    8328  ++inoutStats.allocationCount;
    8329 
    8330  // 3. Prepare for next iteration.
    8331  lastOffset = suballoc.offset + suballoc.size;
    8332  ++nextAlloc1stIndex;
    8333  }
    8334  // We are at the end.
    8335  else
    8336  {
    8337  if(lastOffset < freeSpace1stTo2ndEnd)
    8338  {
    8339  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8340  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8341  inoutStats.unusedSize += unusedRangeSize;
    8342  ++inoutStats.unusedRangeCount;
    8343  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8344  }
    8345 
    8346  // End of loop.
    8347  lastOffset = freeSpace1stTo2ndEnd;
    8348  }
    8349  }
    8350 
    8351  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8352  {
    8353  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8354  while(lastOffset < size)
    8355  {
    8356  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8357  while(nextAlloc2ndIndex != SIZE_MAX &&
    8358  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8359  {
    8360  --nextAlloc2ndIndex;
    8361  }
    8362 
    8363  // Found non-null allocation.
    8364  if(nextAlloc2ndIndex != SIZE_MAX)
    8365  {
    8366  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8367 
    8368  // 1. Process free space before this allocation.
    8369  if(lastOffset < suballoc.offset)
    8370  {
    8371  // There is free space from lastOffset to suballoc.offset.
    8372  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8373  inoutStats.unusedSize += unusedRangeSize;
    8374  ++inoutStats.unusedRangeCount;
    8375  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8376  }
    8377 
    8378  // 2. Process this allocation.
    8379  // There is allocation with suballoc.offset, suballoc.size.
    8380  ++inoutStats.allocationCount;
    8381 
    8382  // 3. Prepare for next iteration.
    8383  lastOffset = suballoc.offset + suballoc.size;
    8384  --nextAlloc2ndIndex;
    8385  }
    8386  // We are at the end.
    8387  else
    8388  {
    8389  if(lastOffset < size)
    8390  {
    8391  // There is free space from lastOffset to size.
    8392  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8393  inoutStats.unusedSize += unusedRangeSize;
    8394  ++inoutStats.unusedRangeCount;
    8395  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8396  }
    8397 
    8398  // End of loop.
    8399  lastOffset = size;
    8400  }
    8401  }
    8402  }
    8403 }
    8404 
    8405 #if VMA_STATS_STRING_ENABLED
    8406 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8407 {
    8408  const VkDeviceSize size = GetSize();
    8409  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8410  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8411  const size_t suballoc1stCount = suballocations1st.size();
    8412  const size_t suballoc2ndCount = suballocations2nd.size();
    8413 
    8414  // FIRST PASS
    8415 
    8416  size_t unusedRangeCount = 0;
    8417  VkDeviceSize usedBytes = 0;
    8418 
    8419  VkDeviceSize lastOffset = 0;
    8420 
    8421  size_t alloc2ndCount = 0;
    8422  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8423  {
    8424  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8425  size_t nextAlloc2ndIndex = 0;
    8426  while(lastOffset < freeSpace2ndTo1stEnd)
    8427  {
    8428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8429  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8431  {
    8432  ++nextAlloc2ndIndex;
    8433  }
    8434 
    8435  // Found non-null allocation.
    8436  if(nextAlloc2ndIndex < suballoc2ndCount)
    8437  {
    8438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8439 
    8440  // 1. Process free space before this allocation.
    8441  if(lastOffset < suballoc.offset)
    8442  {
    8443  // There is free space from lastOffset to suballoc.offset.
    8444  ++unusedRangeCount;
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  ++alloc2ndCount;
    8450  usedBytes += suballoc.size;
    8451 
    8452  // 3. Prepare for next iteration.
    8453  lastOffset = suballoc.offset + suballoc.size;
    8454  ++nextAlloc2ndIndex;
    8455  }
    8456  // We are at the end.
    8457  else
    8458  {
    8459  if(lastOffset < freeSpace2ndTo1stEnd)
    8460  {
    8461  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8462  ++unusedRangeCount;
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace2ndTo1stEnd;
    8467  }
    8468  }
    8469  }
    8470 
    8471  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8472  size_t alloc1stCount = 0;
    8473  const VkDeviceSize freeSpace1stTo2ndEnd =
    8474  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8475  while(lastOffset < freeSpace1stTo2ndEnd)
    8476  {
    8477  // Find next non-null allocation or move nextAllocIndex to the end.
    8478  while(nextAlloc1stIndex < suballoc1stCount &&
    8479  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8480  {
    8481  ++nextAlloc1stIndex;
    8482  }
    8483 
    8484  // Found non-null allocation.
    8485  if(nextAlloc1stIndex < suballoc1stCount)
    8486  {
    8487  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8488 
    8489  // 1. Process free space before this allocation.
    8490  if(lastOffset < suballoc.offset)
    8491  {
    8492  // There is free space from lastOffset to suballoc.offset.
    8493  ++unusedRangeCount;
    8494  }
    8495 
    8496  // 2. Process this allocation.
    8497  // There is allocation with suballoc.offset, suballoc.size.
    8498  ++alloc1stCount;
    8499  usedBytes += suballoc.size;
    8500 
    8501  // 3. Prepare for next iteration.
    8502  lastOffset = suballoc.offset + suballoc.size;
    8503  ++nextAlloc1stIndex;
    8504  }
    8505  // We are at the end.
    8506  else
    8507  {
    8508  if(lastOffset < size)
    8509  {
    8510  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8511  ++unusedRangeCount;
    8512  }
    8513 
    8514  // End of loop.
    8515  lastOffset = freeSpace1stTo2ndEnd;
    8516  }
    8517  }
    8518 
    8519  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8520  {
    8521  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8522  while(lastOffset < size)
    8523  {
    8524  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8525  while(nextAlloc2ndIndex != SIZE_MAX &&
    8526  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8527  {
    8528  --nextAlloc2ndIndex;
    8529  }
    8530 
    8531  // Found non-null allocation.
    8532  if(nextAlloc2ndIndex != SIZE_MAX)
    8533  {
    8534  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8535 
    8536  // 1. Process free space before this allocation.
    8537  if(lastOffset < suballoc.offset)
    8538  {
    8539  // There is free space from lastOffset to suballoc.offset.
    8540  ++unusedRangeCount;
    8541  }
    8542 
    8543  // 2. Process this allocation.
    8544  // There is allocation with suballoc.offset, suballoc.size.
    8545  ++alloc2ndCount;
    8546  usedBytes += suballoc.size;
    8547 
    8548  // 3. Prepare for next iteration.
    8549  lastOffset = suballoc.offset + suballoc.size;
    8550  --nextAlloc2ndIndex;
    8551  }
    8552  // We are at the end.
    8553  else
    8554  {
    8555  if(lastOffset < size)
    8556  {
    8557  // There is free space from lastOffset to size.
    8558  ++unusedRangeCount;
    8559  }
    8560 
    8561  // End of loop.
    8562  lastOffset = size;
    8563  }
    8564  }
    8565  }
    8566 
    8567  const VkDeviceSize unusedBytes = size - usedBytes;
    8568  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8569 
    8570  // SECOND PASS
    8571  lastOffset = 0;
    8572 
    8573  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8574  {
    8575  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8576  size_t nextAlloc2ndIndex = 0;
    8577  while(lastOffset < freeSpace2ndTo1stEnd)
    8578  {
    8579  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8580  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8581  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8582  {
    8583  ++nextAlloc2ndIndex;
    8584  }
    8585 
    8586  // Found non-null allocation.
    8587  if(nextAlloc2ndIndex < suballoc2ndCount)
    8588  {
    8589  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8590 
    8591  // 1. Process free space before this allocation.
    8592  if(lastOffset < suballoc.offset)
    8593  {
    8594  // There is free space from lastOffset to suballoc.offset.
    8595  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8596  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8597  }
    8598 
    8599  // 2. Process this allocation.
    8600  // There is allocation with suballoc.offset, suballoc.size.
    8601  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8602 
    8603  // 3. Prepare for next iteration.
    8604  lastOffset = suballoc.offset + suballoc.size;
    8605  ++nextAlloc2ndIndex;
    8606  }
    8607  // We are at the end.
    8608  else
    8609  {
    8610  if(lastOffset < freeSpace2ndTo1stEnd)
    8611  {
    8612  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8613  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8614  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8615  }
    8616 
    8617  // End of loop.
    8618  lastOffset = freeSpace2ndTo1stEnd;
    8619  }
    8620  }
    8621  }
    8622 
    8623  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8624  while(lastOffset < freeSpace1stTo2ndEnd)
    8625  {
    8626  // Find next non-null allocation or move nextAllocIndex to the end.
    8627  while(nextAlloc1stIndex < suballoc1stCount &&
    8628  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8629  {
    8630  ++nextAlloc1stIndex;
    8631  }
    8632 
    8633  // Found non-null allocation.
    8634  if(nextAlloc1stIndex < suballoc1stCount)
    8635  {
    8636  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8637 
    8638  // 1. Process free space before this allocation.
    8639  if(lastOffset < suballoc.offset)
    8640  {
    8641  // There is free space from lastOffset to suballoc.offset.
    8642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8644  }
    8645 
    8646  // 2. Process this allocation.
    8647  // There is allocation with suballoc.offset, suballoc.size.
    8648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8649 
    8650  // 3. Prepare for next iteration.
    8651  lastOffset = suballoc.offset + suballoc.size;
    8652  ++nextAlloc1stIndex;
    8653  }
    8654  // We are at the end.
    8655  else
    8656  {
    8657  if(lastOffset < freeSpace1stTo2ndEnd)
    8658  {
    8659  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8660  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8662  }
    8663 
    8664  // End of loop.
    8665  lastOffset = freeSpace1stTo2ndEnd;
    8666  }
    8667  }
    8668 
    8669  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8670  {
    8671  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8672  while(lastOffset < size)
    8673  {
    8674  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8675  while(nextAlloc2ndIndex != SIZE_MAX &&
    8676  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8677  {
    8678  --nextAlloc2ndIndex;
    8679  }
    8680 
    8681  // Found non-null allocation.
    8682  if(nextAlloc2ndIndex != SIZE_MAX)
    8683  {
    8684  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8685 
    8686  // 1. Process free space before this allocation.
    8687  if(lastOffset < suballoc.offset)
    8688  {
    8689  // There is free space from lastOffset to suballoc.offset.
    8690  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8692  }
    8693 
    8694  // 2. Process this allocation.
    8695  // There is allocation with suballoc.offset, suballoc.size.
    8696  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8697 
    8698  // 3. Prepare for next iteration.
    8699  lastOffset = suballoc.offset + suballoc.size;
    8700  --nextAlloc2ndIndex;
    8701  }
    8702  // We are at the end.
    8703  else
    8704  {
    8705  if(lastOffset < size)
    8706  {
    8707  // There is free space from lastOffset to size.
    8708  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8709  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8710  }
    8711 
    8712  // End of loop.
    8713  lastOffset = size;
    8714  }
    8715  }
    8716  }
    8717 
    8718  PrintDetailedMap_End(json);
    8719 }
    8720 #endif // #if VMA_STATS_STRING_ENABLED
    8721 
    8722 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8723  uint32_t currentFrameIndex,
    8724  uint32_t frameInUseCount,
    8725  VkDeviceSize bufferImageGranularity,
    8726  VkDeviceSize allocSize,
    8727  VkDeviceSize allocAlignment,
    8728  bool upperAddress,
    8729  VmaSuballocationType allocType,
    8730  bool canMakeOtherLost,
    8731  uint32_t strategy,
    8732  VmaAllocationRequest* pAllocationRequest)
    8733 {
    8734  VMA_ASSERT(allocSize > 0);
    8735  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8736  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8737  VMA_HEAVY_ASSERT(Validate());
    8738 
    8739  const VkDeviceSize size = GetSize();
    8740  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8741  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8742 
    8743  if(upperAddress)
    8744  {
    8745  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8746  {
    8747  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8748  return false;
    8749  }
    8750 
    8751  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8752  if(allocSize > size)
    8753  {
    8754  return false;
    8755  }
    8756  VkDeviceSize resultBaseOffset = size - allocSize;
    8757  if(!suballocations2nd.empty())
    8758  {
    8759  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8760  resultBaseOffset = lastSuballoc.offset - allocSize;
    8761  if(allocSize > lastSuballoc.offset)
    8762  {
    8763  return false;
    8764  }
    8765  }
    8766 
    8767  // Start from offset equal to end of free space.
    8768  VkDeviceSize resultOffset = resultBaseOffset;
    8769 
    8770  // Apply VMA_DEBUG_MARGIN at the end.
    8771  if(VMA_DEBUG_MARGIN > 0)
    8772  {
    8773  if(resultOffset < VMA_DEBUG_MARGIN)
    8774  {
    8775  return false;
    8776  }
    8777  resultOffset -= VMA_DEBUG_MARGIN;
    8778  }
    8779 
    8780  // Apply alignment.
    8781  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8782 
    8783  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8784  // Make bigger alignment if necessary.
    8785  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8786  {
    8787  bool bufferImageGranularityConflict = false;
    8788  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8789  {
    8790  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8791  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8792  {
    8793  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8794  {
    8795  bufferImageGranularityConflict = true;
    8796  break;
    8797  }
    8798  }
    8799  else
    8800  // Already on previous page.
    8801  break;
    8802  }
    8803  if(bufferImageGranularityConflict)
    8804  {
    8805  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8806  }
    8807  }
    8808 
    8809  // There is enough free space.
    8810  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8811  suballocations1st.back().offset + suballocations1st.back().size :
    8812  0;
    8813  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8814  {
    8815  // Check previous suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, allocation cannot be made here.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8820  {
    8821  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8822  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8823  {
    8824  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8825  {
    8826  return false;
    8827  }
    8828  }
    8829  else
    8830  {
    8831  // Already on next page.
    8832  break;
    8833  }
    8834  }
    8835  }
    8836 
    8837  // All tests passed: Success.
    8838  pAllocationRequest->offset = resultOffset;
    8839  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8840  pAllocationRequest->sumItemSize = 0;
    8841  // pAllocationRequest->item unused.
    8842  pAllocationRequest->itemsToMakeLostCount = 0;
    8843  return true;
    8844  }
    8845  }
    8846  else // !upperAddress
    8847  {
    8848  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8849  {
    8850  // Try to allocate at the end of 1st vector.
    8851 
    8852  VkDeviceSize resultBaseOffset = 0;
    8853  if(!suballocations1st.empty())
    8854  {
    8855  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8856  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8857  }
    8858 
    8859  // Start from offset equal to beginning of free space.
    8860  VkDeviceSize resultOffset = resultBaseOffset;
    8861 
    8862  // Apply VMA_DEBUG_MARGIN at the beginning.
    8863  if(VMA_DEBUG_MARGIN > 0)
    8864  {
    8865  resultOffset += VMA_DEBUG_MARGIN;
    8866  }
    8867 
    8868  // Apply alignment.
    8869  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8870 
    8871  // Check previous suballocations for BufferImageGranularity conflicts.
    8872  // Make bigger alignment if necessary.
    8873  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8874  {
    8875  bool bufferImageGranularityConflict = false;
    8876  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8877  {
    8878  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8879  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8880  {
    8881  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8882  {
    8883  bufferImageGranularityConflict = true;
    8884  break;
    8885  }
    8886  }
    8887  else
    8888  // Already on previous page.
    8889  break;
    8890  }
    8891  if(bufferImageGranularityConflict)
    8892  {
    8893  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8894  }
    8895  }
    8896 
    8897  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8898  suballocations2nd.back().offset : size;
    8899 
    8900  // There is enough free space at the end after alignment.
    8901  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8902  {
    8903  // Check next suballocations for BufferImageGranularity conflicts.
    8904  // If conflict exists, allocation cannot be made here.
    8905  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8906  {
    8907  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8908  {
    8909  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8910  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8911  {
    8912  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8913  {
    8914  return false;
    8915  }
    8916  }
    8917  else
    8918  {
    8919  // Already on previous page.
    8920  break;
    8921  }
    8922  }
    8923  }
    8924 
    8925  // All tests passed: Success.
    8926  pAllocationRequest->offset = resultOffset;
    8927  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8928  pAllocationRequest->sumItemSize = 0;
    8929  // pAllocationRequest->item unused.
    8930  pAllocationRequest->itemsToMakeLostCount = 0;
    8931  return true;
    8932  }
    8933  }
    8934 
    8935  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8936  // beginning of 1st vector as the end of free space.
    8937  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8938  {
    8939  VMA_ASSERT(!suballocations1st.empty());
    8940 
    8941  VkDeviceSize resultBaseOffset = 0;
    8942  if(!suballocations2nd.empty())
    8943  {
    8944  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8945  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8946  }
    8947 
    8948  // Start from offset equal to beginning of free space.
    8949  VkDeviceSize resultOffset = resultBaseOffset;
    8950 
    8951  // Apply VMA_DEBUG_MARGIN at the beginning.
    8952  if(VMA_DEBUG_MARGIN > 0)
    8953  {
    8954  resultOffset += VMA_DEBUG_MARGIN;
    8955  }
    8956 
    8957  // Apply alignment.
    8958  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8959 
    8960  // Check previous suballocations for BufferImageGranularity conflicts.
    8961  // Make bigger alignment if necessary.
    8962  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8963  {
    8964  bool bufferImageGranularityConflict = false;
    8965  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8966  {
    8967  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8968  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8969  {
    8970  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8971  {
    8972  bufferImageGranularityConflict = true;
    8973  break;
    8974  }
    8975  }
    8976  else
    8977  // Already on previous page.
    8978  break;
    8979  }
    8980  if(bufferImageGranularityConflict)
    8981  {
    8982  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8983  }
    8984  }
    8985 
    8986  pAllocationRequest->itemsToMakeLostCount = 0;
    8987  pAllocationRequest->sumItemSize = 0;
    8988  size_t index1st = m_1stNullItemsBeginCount;
    8989 
    8990  if(canMakeOtherLost)
    8991  {
    8992  while(index1st < suballocations1st.size() &&
    8993  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8994  {
    8995  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8996  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8997  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8998  {
    8999  // No problem.
    9000  }
    9001  else
    9002  {
    9003  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9004  if(suballoc.hAllocation->CanBecomeLost() &&
    9005  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9006  {
    9007  ++pAllocationRequest->itemsToMakeLostCount;
    9008  pAllocationRequest->sumItemSize += suballoc.size;
    9009  }
    9010  else
    9011  {
    9012  return false;
    9013  }
    9014  }
    9015  ++index1st;
    9016  }
    9017 
    9018  // Check next suballocations for BufferImageGranularity conflicts.
    9019  // If conflict exists, we must mark more allocations lost or fail.
    9020  if(bufferImageGranularity > 1)
    9021  {
    9022  while(index1st < suballocations1st.size())
    9023  {
    9024  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9025  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9026  {
    9027  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9028  {
    9029  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9030  if(suballoc.hAllocation->CanBecomeLost() &&
    9031  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9032  {
    9033  ++pAllocationRequest->itemsToMakeLostCount;
    9034  pAllocationRequest->sumItemSize += suballoc.size;
    9035  }
    9036  else
    9037  {
    9038  return false;
    9039  }
    9040  }
    9041  }
    9042  else
    9043  {
    9044  // Already on next page.
    9045  break;
    9046  }
    9047  ++index1st;
    9048  }
    9049  }
    9050  }
    9051 
    9052  // There is enough free space at the end after alignment.
    9053  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9054  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9055  {
    9056  // Check next suballocations for BufferImageGranularity conflicts.
    9057  // If conflict exists, allocation cannot be made here.
    9058  if(bufferImageGranularity > 1)
    9059  {
    9060  for(size_t nextSuballocIndex = index1st;
    9061  nextSuballocIndex < suballocations1st.size();
    9062  nextSuballocIndex++)
    9063  {
    9064  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9065  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9066  {
    9067  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9068  {
    9069  return false;
    9070  }
    9071  }
    9072  else
    9073  {
    9074  // Already on next page.
    9075  break;
    9076  }
    9077  }
    9078  }
    9079 
    9080  // All tests passed: Success.
    9081  pAllocationRequest->offset = resultOffset;
    9082  pAllocationRequest->sumFreeSize =
    9083  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9084  - resultBaseOffset
    9085  - pAllocationRequest->sumItemSize;
    9086  // pAllocationRequest->item unused.
    9087  return true;
    9088  }
    9089  }
    9090  }
    9091 
    9092  return false;
    9093 }
    9094 
    9095 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9096  uint32_t currentFrameIndex,
    9097  uint32_t frameInUseCount,
    9098  VmaAllocationRequest* pAllocationRequest)
    9099 {
    9100  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9101  {
    9102  return true;
    9103  }
    9104 
    9105  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9106 
    9107  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9108  size_t index1st = m_1stNullItemsBeginCount;
    9109  size_t madeLostCount = 0;
    9110  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9111  {
    9112  VMA_ASSERT(index1st < suballocations1st.size());
    9113  VmaSuballocation& suballoc = suballocations1st[index1st];
    9114  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9115  {
    9116  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9117  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9118  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9119  {
    9120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9121  suballoc.hAllocation = VK_NULL_HANDLE;
    9122  m_SumFreeSize += suballoc.size;
    9123  ++m_1stNullItemsMiddleCount;
    9124  ++madeLostCount;
    9125  }
    9126  else
    9127  {
    9128  return false;
    9129  }
    9130  }
    9131  ++index1st;
    9132  }
    9133 
    9134  CleanupAfterFree();
    9135  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9136 
    9137  return true;
    9138 }
    9139 
    9140 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9141 {
    9142  uint32_t lostAllocationCount = 0;
    9143 
    9144  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9145  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9146  {
    9147  VmaSuballocation& suballoc = suballocations1st[i];
    9148  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9149  suballoc.hAllocation->CanBecomeLost() &&
    9150  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9151  {
    9152  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9153  suballoc.hAllocation = VK_NULL_HANDLE;
    9154  ++m_1stNullItemsMiddleCount;
    9155  m_SumFreeSize += suballoc.size;
    9156  ++lostAllocationCount;
    9157  }
    9158  }
    9159 
    9160  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9161  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9162  {
    9163  VmaSuballocation& suballoc = suballocations2nd[i];
    9164  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9165  suballoc.hAllocation->CanBecomeLost() &&
    9166  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9167  {
    9168  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  suballoc.hAllocation = VK_NULL_HANDLE;
    9170  ++m_2ndNullItemsCount;
    9171  ++lostAllocationCount;
    9172  }
    9173  }
    9174 
    9175  if(lostAllocationCount)
    9176  {
    9177  CleanupAfterFree();
    9178  }
    9179 
    9180  return lostAllocationCount;
    9181 }
    9182 
    9183 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9184 {
    9185  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9186  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9187  {
    9188  const VmaSuballocation& suballoc = suballocations1st[i];
    9189  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9190  {
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9197  {
    9198  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9199  return VK_ERROR_VALIDATION_FAILED_EXT;
    9200  }
    9201  }
    9202  }
    9203 
    9204  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9205  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9206  {
    9207  const VmaSuballocation& suballoc = suballocations2nd[i];
    9208  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9209  {
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9216  {
    9217  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9218  return VK_ERROR_VALIDATION_FAILED_EXT;
    9219  }
    9220  }
    9221  }
    9222 
    9223  return VK_SUCCESS;
    9224 }
    9225 
    9226 void VmaBlockMetadata_Linear::Alloc(
    9227  const VmaAllocationRequest& request,
    9228  VmaSuballocationType type,
    9229  VkDeviceSize allocSize,
    9230  bool upperAddress,
    9231  VmaAllocation hAllocation)
    9232 {
    9233  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9234 
    9235  if(upperAddress)
    9236  {
    9237  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9238  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9239  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9240  suballocations2nd.push_back(newSuballoc);
    9241  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9242  }
    9243  else
    9244  {
    9245  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9246 
    9247  // First allocation.
    9248  if(suballocations1st.empty())
    9249  {
    9250  suballocations1st.push_back(newSuballoc);
    9251  }
    9252  else
    9253  {
    9254  // New allocation at the end of 1st vector.
    9255  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9256  {
    9257  // Check if it fits before the end of the block.
    9258  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9259  suballocations1st.push_back(newSuballoc);
    9260  }
    9261  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9262  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9263  {
    9264  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9265 
    9266  switch(m_2ndVectorMode)
    9267  {
    9268  case SECOND_VECTOR_EMPTY:
    9269  // First allocation from second part ring buffer.
    9270  VMA_ASSERT(suballocations2nd.empty());
    9271  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9272  break;
    9273  case SECOND_VECTOR_RING_BUFFER:
    9274  // 2-part ring buffer is already started.
    9275  VMA_ASSERT(!suballocations2nd.empty());
    9276  break;
    9277  case SECOND_VECTOR_DOUBLE_STACK:
    9278  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9279  break;
    9280  default:
    9281  VMA_ASSERT(0);
    9282  }
    9283 
    9284  suballocations2nd.push_back(newSuballoc);
    9285  }
    9286  else
    9287  {
    9288  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9289  }
    9290  }
    9291  }
    9292 
    9293  m_SumFreeSize -= newSuballoc.size;
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9297 {
    9298  FreeAtOffset(allocation->GetOffset());
    9299 }
    9300 
    9301 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9302 {
    9303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9305 
    9306  if(!suballocations1st.empty())
    9307  {
    9308  // First allocation: Mark it as next empty at the beginning.
    9309  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9310  if(firstSuballoc.offset == offset)
    9311  {
    9312  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9313  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9314  m_SumFreeSize += firstSuballoc.size;
    9315  ++m_1stNullItemsBeginCount;
    9316  CleanupAfterFree();
    9317  return;
    9318  }
    9319  }
    9320 
    9321  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9322  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9323  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9324  {
    9325  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9326  if(lastSuballoc.offset == offset)
    9327  {
    9328  m_SumFreeSize += lastSuballoc.size;
    9329  suballocations2nd.pop_back();
    9330  CleanupAfterFree();
    9331  return;
    9332  }
    9333  }
    9334  // Last allocation in 1st vector.
    9335  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9336  {
    9337  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9338  if(lastSuballoc.offset == offset)
    9339  {
    9340  m_SumFreeSize += lastSuballoc.size;
    9341  suballocations1st.pop_back();
    9342  CleanupAfterFree();
    9343  return;
    9344  }
    9345  }
    9346 
    9347  // Item from the middle of 1st vector.
    9348  {
    9349  VmaSuballocation refSuballoc;
    9350  refSuballoc.offset = offset;
    9351  // Rest of members stays uninitialized intentionally for better performance.
    9352  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9353  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9354  suballocations1st.end(),
    9355  refSuballoc);
    9356  if(it != suballocations1st.end())
    9357  {
    9358  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9359  it->hAllocation = VK_NULL_HANDLE;
    9360  ++m_1stNullItemsMiddleCount;
    9361  m_SumFreeSize += it->size;
    9362  CleanupAfterFree();
    9363  return;
    9364  }
    9365  }
    9366 
    9367  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9368  {
    9369  // Item from the middle of 2nd vector.
    9370  VmaSuballocation refSuballoc;
    9371  refSuballoc.offset = offset;
    9372  // Rest of members stays uninitialized intentionally for better performance.
    9373  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9374  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9375  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9376  if(it != suballocations2nd.end())
    9377  {
    9378  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9379  it->hAllocation = VK_NULL_HANDLE;
    9380  ++m_2ndNullItemsCount;
    9381  m_SumFreeSize += it->size;
    9382  CleanupAfterFree();
    9383  return;
    9384  }
    9385  }
    9386 
    9387  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9388 }
    9389 
    9390 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9391 {
    9392  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9393  const size_t suballocCount = AccessSuballocations1st().size();
    9394  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9395 }
    9396 
    9397 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9398 {
    9399  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9400  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9401 
    9402  if(IsEmpty())
    9403  {
    9404  suballocations1st.clear();
    9405  suballocations2nd.clear();
    9406  m_1stNullItemsBeginCount = 0;
    9407  m_1stNullItemsMiddleCount = 0;
    9408  m_2ndNullItemsCount = 0;
    9409  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9410  }
    9411  else
    9412  {
    9413  const size_t suballoc1stCount = suballocations1st.size();
    9414  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9415  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9416 
    9417  // Find more null items at the beginning of 1st vector.
    9418  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9419  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9420  {
    9421  ++m_1stNullItemsBeginCount;
    9422  --m_1stNullItemsMiddleCount;
    9423  }
    9424 
    9425  // Find more null items at the end of 1st vector.
    9426  while(m_1stNullItemsMiddleCount > 0 &&
    9427  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9428  {
    9429  --m_1stNullItemsMiddleCount;
    9430  suballocations1st.pop_back();
    9431  }
    9432 
    9433  // Find more null items at the end of 2nd vector.
    9434  while(m_2ndNullItemsCount > 0 &&
    9435  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9436  {
    9437  --m_2ndNullItemsCount;
    9438  suballocations2nd.pop_back();
    9439  }
    9440 
    9441  if(ShouldCompact1st())
    9442  {
    9443  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9444  size_t srcIndex = m_1stNullItemsBeginCount;
    9445  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9446  {
    9447  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9448  {
    9449  ++srcIndex;
    9450  }
    9451  if(dstIndex != srcIndex)
    9452  {
    9453  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9454  }
    9455  ++srcIndex;
    9456  }
    9457  suballocations1st.resize(nonNullItemCount);
    9458  m_1stNullItemsBeginCount = 0;
    9459  m_1stNullItemsMiddleCount = 0;
    9460  }
    9461 
    9462  // 2nd vector became empty.
    9463  if(suballocations2nd.empty())
    9464  {
    9465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9466  }
    9467 
    9468  // 1st vector became empty.
    9469  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9470  {
    9471  suballocations1st.clear();
    9472  m_1stNullItemsBeginCount = 0;
    9473 
    9474  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9475  {
    9476  // Swap 1st with 2nd. Now 2nd is empty.
    9477  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9478  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9479  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9480  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9481  {
    9482  ++m_1stNullItemsBeginCount;
    9483  --m_1stNullItemsMiddleCount;
    9484  }
    9485  m_2ndNullItemsCount = 0;
    9486  m_1stVectorIndex ^= 1;
    9487  }
    9488  }
    9489  }
    9490 
    9491  VMA_HEAVY_ASSERT(Validate());
    9492 }
    9493 
    9494 
    9496 // class VmaBlockMetadata_Buddy
    9497 
    9498 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9499  VmaBlockMetadata(hAllocator),
    9500  m_Root(VMA_NULL),
    9501  m_AllocationCount(0),
    9502  m_FreeCount(1),
    9503  m_SumFreeSize(0)
    9504 {
    9505  memset(m_FreeList, 0, sizeof(m_FreeList));
    9506 }
    9507 
    9508 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9509 {
    9510  DeleteNode(m_Root);
    9511 }
    9512 
    9513 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9514 {
    9515  VmaBlockMetadata::Init(size);
    9516 
    9517  m_UsableSize = VmaPrevPow2(size);
    9518  m_SumFreeSize = m_UsableSize;
    9519 
    9520  // Calculate m_LevelCount.
    9521  m_LevelCount = 1;
    9522  while(m_LevelCount < MAX_LEVELS &&
    9523  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9524  {
    9525  ++m_LevelCount;
    9526  }
    9527 
    9528  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9529  rootNode->offset = 0;
    9530  rootNode->type = Node::TYPE_FREE;
    9531  rootNode->parent = VMA_NULL;
    9532  rootNode->buddy = VMA_NULL;
    9533 
    9534  m_Root = rootNode;
    9535  AddToFreeListFront(0, rootNode);
    9536 }
    9537 
    9538 bool VmaBlockMetadata_Buddy::Validate() const
    9539 {
    9540  // Validate tree.
    9541  ValidationContext ctx;
    9542  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9543  {
    9544  VMA_VALIDATE(false && "ValidateNode failed.");
    9545  }
    9546  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9547  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9548 
    9549  // Validate free node lists.
    9550  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9551  {
    9552  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9553  m_FreeList[level].front->free.prev == VMA_NULL);
    9554 
    9555  for(Node* node = m_FreeList[level].front;
    9556  node != VMA_NULL;
    9557  node = node->free.next)
    9558  {
    9559  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9560 
    9561  if(node->free.next == VMA_NULL)
    9562  {
    9563  VMA_VALIDATE(m_FreeList[level].back == node);
    9564  }
    9565  else
    9566  {
    9567  VMA_VALIDATE(node->free.next->free.prev == node);
    9568  }
    9569  }
    9570  }
    9571 
    9572  // Validate that free lists ar higher levels are empty.
    9573  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9574  {
    9575  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9576  }
    9577 
    9578  return true;
    9579 }
    9580 
    9581 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9582 {
    9583  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9584  {
    9585  if(m_FreeList[level].front != VMA_NULL)
    9586  {
    9587  return LevelToNodeSize(level);
    9588  }
    9589  }
    9590  return 0;
    9591 }
    9592 
    9593 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9594 {
    9595  const VkDeviceSize unusableSize = GetUnusableSize();
    9596 
    9597  outInfo.blockCount = 1;
    9598 
    9599  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9600  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9601 
    9602  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9603  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9604  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9605 
    9606  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9607 
    9608  if(unusableSize > 0)
    9609  {
    9610  ++outInfo.unusedRangeCount;
    9611  outInfo.unusedBytes += unusableSize;
    9612  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9613  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9614  }
    9615 }
    9616 
    9617 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9618 {
    9619  const VkDeviceSize unusableSize = GetUnusableSize();
    9620 
    9621  inoutStats.size += GetSize();
    9622  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9623  inoutStats.allocationCount += m_AllocationCount;
    9624  inoutStats.unusedRangeCount += m_FreeCount;
    9625  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9626 
    9627  if(unusableSize > 0)
    9628  {
    9629  ++inoutStats.unusedRangeCount;
    9630  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9631  }
    9632 }
    9633 
    9634 #if VMA_STATS_STRING_ENABLED
    9635 
    9636 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9637 {
    9638  // TODO optimize
    9639  VmaStatInfo stat;
    9640  CalcAllocationStatInfo(stat);
    9641 
    9642  PrintDetailedMap_Begin(
    9643  json,
    9644  stat.unusedBytes,
    9645  stat.allocationCount,
    9646  stat.unusedRangeCount);
    9647 
    9648  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9649 
    9650  const VkDeviceSize unusableSize = GetUnusableSize();
    9651  if(unusableSize > 0)
    9652  {
    9653  PrintDetailedMap_UnusedRange(json,
    9654  m_UsableSize, // offset
    9655  unusableSize); // size
    9656  }
    9657 
    9658  PrintDetailedMap_End(json);
    9659 }
    9660 
    9661 #endif // #if VMA_STATS_STRING_ENABLED
    9662 
    9663 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9664  uint32_t currentFrameIndex,
    9665  uint32_t frameInUseCount,
    9666  VkDeviceSize bufferImageGranularity,
    9667  VkDeviceSize allocSize,
    9668  VkDeviceSize allocAlignment,
    9669  bool upperAddress,
    9670  VmaSuballocationType allocType,
    9671  bool canMakeOtherLost,
    9672  uint32_t strategy,
    9673  VmaAllocationRequest* pAllocationRequest)
    9674 {
    9675  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9676 
    9677  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9678  // Whenever it might be an OPTIMAL image...
    9679  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9680  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9682  {
    9683  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9684  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9685  }
    9686 
    9687  if(allocSize > m_UsableSize)
    9688  {
    9689  return false;
    9690  }
    9691 
    9692  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9693  for(uint32_t level = targetLevel + 1; level--; )
    9694  {
    9695  for(Node* freeNode = m_FreeList[level].front;
    9696  freeNode != VMA_NULL;
    9697  freeNode = freeNode->free.next)
    9698  {
    9699  if(freeNode->offset % allocAlignment == 0)
    9700  {
    9701  pAllocationRequest->offset = freeNode->offset;
    9702  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9703  pAllocationRequest->sumItemSize = 0;
    9704  pAllocationRequest->itemsToMakeLostCount = 0;
    9705  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9706  return true;
    9707  }
    9708  }
    9709  }
    9710 
    9711  return false;
    9712 }
    9713 
    9714 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9715  uint32_t currentFrameIndex,
    9716  uint32_t frameInUseCount,
    9717  VmaAllocationRequest* pAllocationRequest)
    9718 {
    9719  /*
    9720  Lost allocations are not supported in buddy allocator at the moment.
    9721  Support might be added in the future.
    9722  */
    9723  return pAllocationRequest->itemsToMakeLostCount == 0;
    9724 }
    9725 
    9726 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9727 {
    9728  /*
    9729  Lost allocations are not supported in buddy allocator at the moment.
    9730  Support might be added in the future.
    9731  */
    9732  return 0;
    9733 }
    9734 
    9735 void VmaBlockMetadata_Buddy::Alloc(
    9736  const VmaAllocationRequest& request,
    9737  VmaSuballocationType type,
    9738  VkDeviceSize allocSize,
    9739  bool upperAddress,
    9740  VmaAllocation hAllocation)
    9741 {
    9742  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9743  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9744 
    9745  Node* currNode = m_FreeList[currLevel].front;
    9746  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9747  while(currNode->offset != request.offset)
    9748  {
    9749  currNode = currNode->free.next;
    9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9751  }
    9752 
    9753  // Go down, splitting free nodes.
    9754  while(currLevel < targetLevel)
    9755  {
    9756  // currNode is already first free node at currLevel.
    9757  // Remove it from list of free nodes at this currLevel.
    9758  RemoveFromFreeList(currLevel, currNode);
    9759 
    9760  const uint32_t childrenLevel = currLevel + 1;
    9761 
    9762  // Create two free sub-nodes.
    9763  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9764  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9765 
    9766  leftChild->offset = currNode->offset;
    9767  leftChild->type = Node::TYPE_FREE;
    9768  leftChild->parent = currNode;
    9769  leftChild->buddy = rightChild;
    9770 
    9771  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9772  rightChild->type = Node::TYPE_FREE;
    9773  rightChild->parent = currNode;
    9774  rightChild->buddy = leftChild;
    9775 
    9776  // Convert current currNode to split type.
    9777  currNode->type = Node::TYPE_SPLIT;
    9778  currNode->split.leftChild = leftChild;
    9779 
    9780  // Add child nodes to free list. Order is important!
    9781  AddToFreeListFront(childrenLevel, rightChild);
    9782  AddToFreeListFront(childrenLevel, leftChild);
    9783 
    9784  ++m_FreeCount;
    9785  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9786  ++currLevel;
    9787  currNode = m_FreeList[currLevel].front;
    9788 
    9789  /*
    9790  We can be sure that currNode, as left child of node previously split,
    9791  also fullfills the alignment requirement.
    9792  */
    9793  }
    9794 
    9795  // Remove from free list.
    9796  VMA_ASSERT(currLevel == targetLevel &&
    9797  currNode != VMA_NULL &&
    9798  currNode->type == Node::TYPE_FREE);
    9799  RemoveFromFreeList(currLevel, currNode);
    9800 
    9801  // Convert to allocation node.
    9802  currNode->type = Node::TYPE_ALLOCATION;
    9803  currNode->allocation.alloc = hAllocation;
    9804 
    9805  ++m_AllocationCount;
    9806  --m_FreeCount;
    9807  m_SumFreeSize -= allocSize;
    9808 }
    9809 
    9810 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9811 {
    9812  if(node->type == Node::TYPE_SPLIT)
    9813  {
    9814  DeleteNode(node->split.leftChild->buddy);
    9815  DeleteNode(node->split.leftChild);
    9816  }
    9817 
    9818  vma_delete(GetAllocationCallbacks(), node);
    9819 }
    9820 
    9821 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9822 {
    9823  VMA_VALIDATE(level < m_LevelCount);
    9824  VMA_VALIDATE(curr->parent == parent);
    9825  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9826  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9827  switch(curr->type)
    9828  {
    9829  case Node::TYPE_FREE:
    9830  // curr->free.prev, next are validated separately.
    9831  ctx.calculatedSumFreeSize += levelNodeSize;
    9832  ++ctx.calculatedFreeCount;
    9833  break;
    9834  case Node::TYPE_ALLOCATION:
    9835  ++ctx.calculatedAllocationCount;
    9836  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9837  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9838  break;
    9839  case Node::TYPE_SPLIT:
    9840  {
    9841  const uint32_t childrenLevel = level + 1;
    9842  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9843  const Node* const leftChild = curr->split.leftChild;
    9844  VMA_VALIDATE(leftChild != VMA_NULL);
    9845  VMA_VALIDATE(leftChild->offset == curr->offset);
    9846  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9847  {
    9848  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9849  }
    9850  const Node* const rightChild = leftChild->buddy;
    9851  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9852  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9853  {
    9854  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9855  }
    9856  }
    9857  break;
    9858  default:
    9859  return false;
    9860  }
    9861 
    9862  return true;
    9863 }
    9864 
    9865 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9866 {
    9867  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9868  uint32_t level = 0;
    9869  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9870  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9872  {
    9873  ++level;
    9874  currLevelNodeSize = nextLevelNodeSize;
    9875  nextLevelNodeSize = currLevelNodeSize >> 1;
    9876  }
    9877  return level;
    9878 }
    9879 
    9880 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9881 {
    9882  // Find node and level.
    9883  Node* node = m_Root;
    9884  VkDeviceSize nodeOffset = 0;
    9885  uint32_t level = 0;
    9886  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9887  while(node->type == Node::TYPE_SPLIT)
    9888  {
    9889  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9890  if(offset < nodeOffset + nextLevelSize)
    9891  {
    9892  node = node->split.leftChild;
    9893  }
    9894  else
    9895  {
    9896  node = node->split.leftChild->buddy;
    9897  nodeOffset += nextLevelSize;
    9898  }
    9899  ++level;
    9900  levelNodeSize = nextLevelSize;
    9901  }
    9902 
    9903  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9904  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9905 
    9906  ++m_FreeCount;
    9907  --m_AllocationCount;
    9908  m_SumFreeSize += alloc->GetSize();
    9909 
    9910  node->type = Node::TYPE_FREE;
    9911 
    9912  // Join free nodes if possible.
    9913  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9914  {
    9915  RemoveFromFreeList(level, node->buddy);
    9916  Node* const parent = node->parent;
    9917 
    9918  vma_delete(GetAllocationCallbacks(), node->buddy);
    9919  vma_delete(GetAllocationCallbacks(), node);
    9920  parent->type = Node::TYPE_FREE;
    9921 
    9922  node = parent;
    9923  --level;
    9924  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9925  --m_FreeCount;
    9926  }
    9927 
    9928  AddToFreeListFront(level, node);
    9929 }
    9930 
    9931 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9932 {
    9933  switch(node->type)
    9934  {
    9935  case Node::TYPE_FREE:
    9936  ++outInfo.unusedRangeCount;
    9937  outInfo.unusedBytes += levelNodeSize;
    9938  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9939  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9940  break;
    9941  case Node::TYPE_ALLOCATION:
    9942  {
    9943  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9944  ++outInfo.allocationCount;
    9945  outInfo.usedBytes += allocSize;
    9946  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9947  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9948 
    9949  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9950  if(unusedRangeSize > 0)
    9951  {
    9952  ++outInfo.unusedRangeCount;
    9953  outInfo.unusedBytes += unusedRangeSize;
    9954  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9955  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9956  }
    9957  }
    9958  break;
    9959  case Node::TYPE_SPLIT:
    9960  {
    9961  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9962  const Node* const leftChild = node->split.leftChild;
    9963  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9964  const Node* const rightChild = leftChild->buddy;
    9965  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9966  }
    9967  break;
    9968  default:
    9969  VMA_ASSERT(0);
    9970  }
    9971 }
    9972 
    9973 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9974 {
    9975  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9976 
    9977  // List is empty.
    9978  Node* const frontNode = m_FreeList[level].front;
    9979  if(frontNode == VMA_NULL)
    9980  {
    9981  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9982  node->free.prev = node->free.next = VMA_NULL;
    9983  m_FreeList[level].front = m_FreeList[level].back = node;
    9984  }
    9985  else
    9986  {
    9987  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9988  node->free.prev = VMA_NULL;
    9989  node->free.next = frontNode;
    9990  frontNode->free.prev = node;
    9991  m_FreeList[level].front = node;
    9992  }
    9993 }
    9994 
    9995 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9996 {
    9997  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9998 
    9999  // It is at the front.
    10000  if(node->free.prev == VMA_NULL)
    10001  {
    10002  VMA_ASSERT(m_FreeList[level].front == node);
    10003  m_FreeList[level].front = node->free.next;
    10004  }
    10005  else
    10006  {
    10007  Node* const prevFreeNode = node->free.prev;
    10008  VMA_ASSERT(prevFreeNode->free.next == node);
    10009  prevFreeNode->free.next = node->free.next;
    10010  }
    10011 
    10012  // It is at the back.
    10013  if(node->free.next == VMA_NULL)
    10014  {
    10015  VMA_ASSERT(m_FreeList[level].back == node);
    10016  m_FreeList[level].back = node->free.prev;
    10017  }
    10018  else
    10019  {
    10020  Node* const nextFreeNode = node->free.next;
    10021  VMA_ASSERT(nextFreeNode->free.prev == node);
    10022  nextFreeNode->free.prev = node->free.prev;
    10023  }
    10024 }
    10025 
    10026 #if VMA_STATS_STRING_ENABLED
    10027 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10028 {
    10029  switch(node->type)
    10030  {
    10031  case Node::TYPE_FREE:
    10032  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10033  break;
    10034  case Node::TYPE_ALLOCATION:
    10035  {
    10036  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10037  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10038  if(allocSize < levelNodeSize)
    10039  {
    10040  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10041  }
    10042  }
    10043  break;
    10044  case Node::TYPE_SPLIT:
    10045  {
    10046  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10047  const Node* const leftChild = node->split.leftChild;
    10048  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10049  const Node* const rightChild = leftChild->buddy;
    10050  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10051  }
    10052  break;
    10053  default:
    10054  VMA_ASSERT(0);
    10055  }
    10056 }
    10057 #endif // #if VMA_STATS_STRING_ENABLED
    10058 
    10059 
    10061 // class VmaDeviceMemoryBlock
    10062 
    10063 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10064  m_pMetadata(VMA_NULL),
    10065  m_MemoryTypeIndex(UINT32_MAX),
    10066  m_Id(0),
    10067  m_hMemory(VK_NULL_HANDLE),
    10068  m_MapCount(0),
    10069  m_pMappedData(VMA_NULL)
    10070 {
    10071 }
    10072 
    10073 void VmaDeviceMemoryBlock::Init(
    10074  VmaAllocator hAllocator,
    10075  uint32_t newMemoryTypeIndex,
    10076  VkDeviceMemory newMemory,
    10077  VkDeviceSize newSize,
    10078  uint32_t id,
    10079  uint32_t algorithm)
    10080 {
    10081  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10082 
    10083  m_MemoryTypeIndex = newMemoryTypeIndex;
    10084  m_Id = id;
    10085  m_hMemory = newMemory;
    10086 
    10087  switch(algorithm)
    10088  {
    10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10091  break;
    10093  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10094  break;
    10095  default:
    10096  VMA_ASSERT(0);
    10097  // Fall-through.
    10098  case 0:
    10099  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10100  }
    10101  m_pMetadata->Init(newSize);
    10102 }
    10103 
    10104 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10105 {
    10106  // This is the most important assert in the entire library.
    10107  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10108  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10109 
    10110  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10111  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10112  m_hMemory = VK_NULL_HANDLE;
    10113 
    10114  vma_delete(allocator, m_pMetadata);
    10115  m_pMetadata = VMA_NULL;
    10116 }
    10117 
    10118 bool VmaDeviceMemoryBlock::Validate() const
    10119 {
    10120  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10121  (m_pMetadata->GetSize() != 0));
    10122 
    10123  return m_pMetadata->Validate();
    10124 }
    10125 
    10126 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10127 {
    10128  void* pData = nullptr;
    10129  VkResult res = Map(hAllocator, 1, &pData);
    10130  if(res != VK_SUCCESS)
    10131  {
    10132  return res;
    10133  }
    10134 
    10135  res = m_pMetadata->CheckCorruption(pData);
    10136 
    10137  Unmap(hAllocator, 1);
    10138 
    10139  return res;
    10140 }
    10141 
    10142 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10143 {
    10144  if(count == 0)
    10145  {
    10146  return VK_SUCCESS;
    10147  }
    10148 
    10149  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10150  if(m_MapCount != 0)
    10151  {
    10152  m_MapCount += count;
    10153  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10154  if(ppData != VMA_NULL)
    10155  {
    10156  *ppData = m_pMappedData;
    10157  }
    10158  return VK_SUCCESS;
    10159  }
    10160  else
    10161  {
    10162  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10163  hAllocator->m_hDevice,
    10164  m_hMemory,
    10165  0, // offset
    10166  VK_WHOLE_SIZE,
    10167  0, // flags
    10168  &m_pMappedData);
    10169  if(result == VK_SUCCESS)
    10170  {
    10171  if(ppData != VMA_NULL)
    10172  {
    10173  *ppData = m_pMappedData;
    10174  }
    10175  m_MapCount = count;
    10176  }
    10177  return result;
    10178  }
    10179 }
    10180 
    10181 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10182 {
    10183  if(count == 0)
    10184  {
    10185  return;
    10186  }
    10187 
    10188  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10189  if(m_MapCount >= count)
    10190  {
    10191  m_MapCount -= count;
    10192  if(m_MapCount == 0)
    10193  {
    10194  m_pMappedData = VMA_NULL;
    10195  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10196  }
    10197  }
    10198  else
    10199  {
    10200  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10201  }
    10202 }
    10203 
    10204 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10205 {
    10206  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10207  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10208 
    10209  void* pData;
    10210  VkResult res = Map(hAllocator, 1, &pData);
    10211  if(res != VK_SUCCESS)
    10212  {
    10213  return res;
    10214  }
    10215 
    10216  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10217  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10218 
    10219  Unmap(hAllocator, 1);
    10220 
    10221  return VK_SUCCESS;
    10222 }
    10223 
    10224 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10225 {
    10226  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10227  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10228 
    10229  void* pData;
    10230  VkResult res = Map(hAllocator, 1, &pData);
    10231  if(res != VK_SUCCESS)
    10232  {
    10233  return res;
    10234  }
    10235 
    10236  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10237  {
    10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10239  }
    10240  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10241  {
    10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10243  }
    10244 
    10245  Unmap(hAllocator, 1);
    10246 
    10247  return VK_SUCCESS;
    10248 }
    10249 
    10250 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10251  const VmaAllocator hAllocator,
    10252  const VmaAllocation hAllocation,
    10253  VkBuffer hBuffer)
    10254 {
    10255  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10256  hAllocation->GetBlock() == this);
    10257  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10258  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10259  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10260  hAllocator->m_hDevice,
    10261  hBuffer,
    10262  m_hMemory,
    10263  hAllocation->GetOffset());
    10264 }
    10265 
    10266 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10267  const VmaAllocator hAllocator,
    10268  const VmaAllocation hAllocation,
    10269  VkImage hImage)
    10270 {
    10271  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10272  hAllocation->GetBlock() == this);
    10273  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10274  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10275  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10276  hAllocator->m_hDevice,
    10277  hImage,
    10278  m_hMemory,
    10279  hAllocation->GetOffset());
    10280 }
    10281 
    10282 static void InitStatInfo(VmaStatInfo& outInfo)
    10283 {
    10284  memset(&outInfo, 0, sizeof(outInfo));
    10285  outInfo.allocationSizeMin = UINT64_MAX;
    10286  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10287 }
    10288 
    10289 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10290 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10291 {
    10292  inoutInfo.blockCount += srcInfo.blockCount;
    10293  inoutInfo.allocationCount += srcInfo.allocationCount;
    10294  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10295  inoutInfo.usedBytes += srcInfo.usedBytes;
    10296  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10297  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10298  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10299  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10300  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10301 }
    10302 
    10303 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10304 {
    10305  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10306  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10307  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10308  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10309 }
    10310 
    10311 VmaPool_T::VmaPool_T(
    10312  VmaAllocator hAllocator,
    10313  const VmaPoolCreateInfo& createInfo,
    10314  VkDeviceSize preferredBlockSize) :
    10315  m_BlockVector(
    10316  hAllocator,
    10317  createInfo.memoryTypeIndex,
    10318  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10319  createInfo.minBlockCount,
    10320  createInfo.maxBlockCount,
    10321  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10322  createInfo.frameInUseCount,
    10323  true, // isCustomPool
    10324  createInfo.blockSize != 0, // explicitBlockSize
    10325  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10326  m_Id(0)
    10327 {
    10328 }
    10329 
    10330 VmaPool_T::~VmaPool_T()
    10331 {
    10332 }
    10333 
    10334 #if VMA_STATS_STRING_ENABLED
    10335 
    10336 #endif // #if VMA_STATS_STRING_ENABLED
    10337 
    10338 VmaBlockVector::VmaBlockVector(
    10339  VmaAllocator hAllocator,
    10340  uint32_t memoryTypeIndex,
    10341  VkDeviceSize preferredBlockSize,
    10342  size_t minBlockCount,
    10343  size_t maxBlockCount,
    10344  VkDeviceSize bufferImageGranularity,
    10345  uint32_t frameInUseCount,
    10346  bool isCustomPool,
    10347  bool explicitBlockSize,
    10348  uint32_t algorithm) :
    10349  m_hAllocator(hAllocator),
    10350  m_MemoryTypeIndex(memoryTypeIndex),
    10351  m_PreferredBlockSize(preferredBlockSize),
    10352  m_MinBlockCount(minBlockCount),
    10353  m_MaxBlockCount(maxBlockCount),
    10354  m_BufferImageGranularity(bufferImageGranularity),
    10355  m_FrameInUseCount(frameInUseCount),
    10356  m_IsCustomPool(isCustomPool),
    10357  m_ExplicitBlockSize(explicitBlockSize),
    10358  m_Algorithm(algorithm),
    10359  m_HasEmptyBlock(false),
    10360  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10361  m_pDefragmentator(VMA_NULL),
    10362  m_NextBlockId(0)
    10363 {
    10364 }
    10365 
    10366 VmaBlockVector::~VmaBlockVector()
    10367 {
    10368  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10369 
    10370  for(size_t i = m_Blocks.size(); i--; )
    10371  {
    10372  m_Blocks[i]->Destroy(m_hAllocator);
    10373  vma_delete(m_hAllocator, m_Blocks[i]);
    10374  }
    10375 }
    10376 
    10377 VkResult VmaBlockVector::CreateMinBlocks()
    10378 {
    10379  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10380  {
    10381  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10382  if(res != VK_SUCCESS)
    10383  {
    10384  return res;
    10385  }
    10386  }
    10387  return VK_SUCCESS;
    10388 }
    10389 
    10390 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10391 {
    10392  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10393 
    10394  const size_t blockCount = m_Blocks.size();
    10395 
    10396  pStats->size = 0;
    10397  pStats->unusedSize = 0;
    10398  pStats->allocationCount = 0;
    10399  pStats->unusedRangeCount = 0;
    10400  pStats->unusedRangeSizeMax = 0;
    10401  pStats->blockCount = blockCount;
    10402 
    10403  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10404  {
    10405  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10406  VMA_ASSERT(pBlock);
    10407  VMA_HEAVY_ASSERT(pBlock->Validate());
    10408  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10409  }
    10410 }
    10411 
    10412 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10413 {
    10414  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10415  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10416  (VMA_DEBUG_MARGIN > 0) &&
    10417  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10418 }
    10419 
    10420 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10421 
    10422 VkResult VmaBlockVector::Allocate(
    10423  VmaPool hCurrentPool,
    10424  uint32_t currentFrameIndex,
    10425  VkDeviceSize size,
    10426  VkDeviceSize alignment,
    10427  const VmaAllocationCreateInfo& createInfo,
    10428  VmaSuballocationType suballocType,
    10429  VmaAllocation* pAllocation)
    10430 {
    10431  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10432  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10433  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10434  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10435  const bool canCreateNewBlock =
    10436  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10437  (m_Blocks.size() < m_MaxBlockCount);
    10438  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10439 
    10440  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10441  // Which in turn is available only when maxBlockCount = 1.
    10442  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10443  {
    10444  canMakeOtherLost = false;
    10445  }
    10446 
    10447  // Upper address can only be used with linear allocator and within single memory block.
    10448  if(isUpperAddress &&
    10449  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10450  {
    10451  return VK_ERROR_FEATURE_NOT_PRESENT;
    10452  }
    10453 
    10454  // Validate strategy.
    10455  switch(strategy)
    10456  {
    10457  case 0:
    10459  break;
    10463  break;
    10464  default:
    10465  return VK_ERROR_FEATURE_NOT_PRESENT;
    10466  }
    10467 
    10468  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10469  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10470  {
    10471  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10472  }
    10473 
    10474  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10475 
    10476  /*
    10477  Under certain condition, this whole section can be skipped for optimization, so
    10478  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10479  e.g. for custom pools with linear algorithm.
    10480  */
    10481  if(!canMakeOtherLost || canCreateNewBlock)
    10482  {
    10483  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10484  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10486 
    10487  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10488  {
    10489  // Use only last block.
    10490  if(!m_Blocks.empty())
    10491  {
    10492  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10493  VMA_ASSERT(pCurrBlock);
    10494  VkResult res = AllocateFromBlock(
    10495  pCurrBlock,
    10496  hCurrentPool,
    10497  currentFrameIndex,
    10498  size,
    10499  alignment,
    10500  allocFlagsCopy,
    10501  createInfo.pUserData,
    10502  suballocType,
    10503  strategy,
    10504  pAllocation);
    10505  if(res == VK_SUCCESS)
    10506  {
    10507  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10508  return VK_SUCCESS;
    10509  }
    10510  }
    10511  }
    10512  else
    10513  {
    10515  {
    10516  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10517  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10518  {
    10519  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10520  VMA_ASSERT(pCurrBlock);
    10521  VkResult res = AllocateFromBlock(
    10522  pCurrBlock,
    10523  hCurrentPool,
    10524  currentFrameIndex,
    10525  size,
    10526  alignment,
    10527  allocFlagsCopy,
    10528  createInfo.pUserData,
    10529  suballocType,
    10530  strategy,
    10531  pAllocation);
    10532  if(res == VK_SUCCESS)
    10533  {
    10534  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10535  return VK_SUCCESS;
    10536  }
    10537  }
    10538  }
    10539  else // WORST_FIT, FIRST_FIT
    10540  {
    10541  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10542  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10543  {
    10544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10545  VMA_ASSERT(pCurrBlock);
    10546  VkResult res = AllocateFromBlock(
    10547  pCurrBlock,
    10548  hCurrentPool,
    10549  currentFrameIndex,
    10550  size,
    10551  alignment,
    10552  allocFlagsCopy,
    10553  createInfo.pUserData,
    10554  suballocType,
    10555  strategy,
    10556  pAllocation);
    10557  if(res == VK_SUCCESS)
    10558  {
    10559  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10560  return VK_SUCCESS;
    10561  }
    10562  }
    10563  }
    10564  }
    10565 
    10566  // 2. Try to create new block.
    10567  if(canCreateNewBlock)
    10568  {
    10569  // Calculate optimal size for new block.
    10570  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10571  uint32_t newBlockSizeShift = 0;
    10572  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10573 
    10574  if(!m_ExplicitBlockSize)
    10575  {
    10576  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10577  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10578  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10579  {
    10580  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10581  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10582  {
    10583  newBlockSize = smallerNewBlockSize;
    10584  ++newBlockSizeShift;
    10585  }
    10586  else
    10587  {
    10588  break;
    10589  }
    10590  }
    10591  }
    10592 
    10593  size_t newBlockIndex = 0;
    10594  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10595  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10596  if(!m_ExplicitBlockSize)
    10597  {
    10598  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10599  {
    10600  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10601  if(smallerNewBlockSize >= size)
    10602  {
    10603  newBlockSize = smallerNewBlockSize;
    10604  ++newBlockSizeShift;
    10605  res = CreateBlock(newBlockSize, &newBlockIndex);
    10606  }
    10607  else
    10608  {
    10609  break;
    10610  }
    10611  }
    10612  }
    10613 
    10614  if(res == VK_SUCCESS)
    10615  {
    10616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10617  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10618 
    10619  res = AllocateFromBlock(
    10620  pBlock,
    10621  hCurrentPool,
    10622  currentFrameIndex,
    10623  size,
    10624  alignment,
    10625  allocFlagsCopy,
    10626  createInfo.pUserData,
    10627  suballocType,
    10628  strategy,
    10629  pAllocation);
    10630  if(res == VK_SUCCESS)
    10631  {
    10632  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10633  return VK_SUCCESS;
    10634  }
    10635  else
    10636  {
    10637  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10638  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10639  }
    10640  }
    10641  }
    10642  }
    10643 
    10644  // 3. Try to allocate from existing blocks with making other allocations lost.
    10645  if(canMakeOtherLost)
    10646  {
    10647  uint32_t tryIndex = 0;
    10648  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10649  {
    10650  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10651  VmaAllocationRequest bestRequest = {};
    10652  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10653 
    10654  // 1. Search existing allocations.
    10656  {
    10657  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10658  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10659  {
    10660  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10661  VMA_ASSERT(pCurrBlock);
    10662  VmaAllocationRequest currRequest = {};
    10663  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10664  currentFrameIndex,
    10665  m_FrameInUseCount,
    10666  m_BufferImageGranularity,
    10667  size,
    10668  alignment,
    10669  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10670  suballocType,
    10671  canMakeOtherLost,
    10672  strategy,
    10673  &currRequest))
    10674  {
    10675  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10676  if(pBestRequestBlock == VMA_NULL ||
    10677  currRequestCost < bestRequestCost)
    10678  {
    10679  pBestRequestBlock = pCurrBlock;
    10680  bestRequest = currRequest;
    10681  bestRequestCost = currRequestCost;
    10682 
    10683  if(bestRequestCost == 0)
    10684  {
    10685  break;
    10686  }
    10687  }
    10688  }
    10689  }
    10690  }
    10691  else // WORST_FIT, FIRST_FIT
    10692  {
    10693  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10694  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10695  {
    10696  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10697  VMA_ASSERT(pCurrBlock);
    10698  VmaAllocationRequest currRequest = {};
    10699  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10700  currentFrameIndex,
    10701  m_FrameInUseCount,
    10702  m_BufferImageGranularity,
    10703  size,
    10704  alignment,
    10705  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10706  suballocType,
    10707  canMakeOtherLost,
    10708  strategy,
    10709  &currRequest))
    10710  {
    10711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10712  if(pBestRequestBlock == VMA_NULL ||
    10713  currRequestCost < bestRequestCost ||
    10715  {
    10716  pBestRequestBlock = pCurrBlock;
    10717  bestRequest = currRequest;
    10718  bestRequestCost = currRequestCost;
    10719 
    10720  if(bestRequestCost == 0 ||
    10722  {
    10723  break;
    10724  }
    10725  }
    10726  }
    10727  }
    10728  }
    10729 
    10730  if(pBestRequestBlock != VMA_NULL)
    10731  {
    10732  if(mapped)
    10733  {
    10734  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10735  if(res != VK_SUCCESS)
    10736  {
    10737  return res;
    10738  }
    10739  }
    10740 
    10741  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10742  currentFrameIndex,
    10743  m_FrameInUseCount,
    10744  &bestRequest))
    10745  {
    10746  // We no longer have an empty Allocation.
    10747  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10748  {
    10749  m_HasEmptyBlock = false;
    10750  }
    10751  // Allocate from this pBlock.
    10752  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10753  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10754  (*pAllocation)->InitBlockAllocation(
    10755  hCurrentPool,
    10756  pBestRequestBlock,
    10757  bestRequest.offset,
    10758  alignment,
    10759  size,
    10760  suballocType,
    10761  mapped,
    10762  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10763  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10764  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10765  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  // else: Some allocations must have been touched while we are here. Next try.
    10778  }
    10779  else
    10780  {
    10781  // Could not find place in any of the blocks - break outer loop.
    10782  break;
    10783  }
    10784  }
    10785  /* Maximum number of tries exceeded - a very unlike event when many other
    10786  threads are simultaneously touching allocations making it impossible to make
    10787  lost at the same time as we try to allocate. */
    10788  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10789  {
    10790  return VK_ERROR_TOO_MANY_OBJECTS;
    10791  }
    10792  }
    10793 
    10794  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10795 }
    10796 
    10797 void VmaBlockVector::Free(
    10798  VmaAllocation hAllocation)
    10799 {
    10800  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10801 
    10802  // Scope for lock.
    10803  {
    10804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10805 
    10806  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10807 
    10808  if(IsCorruptionDetectionEnabled())
    10809  {
    10810  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10811  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10812  }
    10813 
    10814  if(hAllocation->IsPersistentMap())
    10815  {
    10816  pBlock->Unmap(m_hAllocator, 1);
    10817  }
    10818 
    10819  pBlock->m_pMetadata->Free(hAllocation);
    10820  VMA_HEAVY_ASSERT(pBlock->Validate());
    10821 
    10822  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10823 
    10824  // pBlock became empty after this deallocation.
    10825  if(pBlock->m_pMetadata->IsEmpty())
    10826  {
    10827  // Already has empty Allocation. We don't want to have two, so delete this one.
    10828  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10829  {
    10830  pBlockToDelete = pBlock;
    10831  Remove(pBlock);
    10832  }
    10833  // We now have first empty block.
    10834  else
    10835  {
    10836  m_HasEmptyBlock = true;
    10837  }
    10838  }
    10839  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10840  // (This is optional, heuristics.)
    10841  else if(m_HasEmptyBlock)
    10842  {
    10843  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10844  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10845  {
    10846  pBlockToDelete = pLastBlock;
    10847  m_Blocks.pop_back();
    10848  m_HasEmptyBlock = false;
    10849  }
    10850  }
    10851 
    10852  IncrementallySortBlocks();
    10853  }
    10854 
    10855  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10856  // lock, for performance reason.
    10857  if(pBlockToDelete != VMA_NULL)
    10858  {
    10859  VMA_DEBUG_LOG(" Deleted empty allocation");
    10860  pBlockToDelete->Destroy(m_hAllocator);
    10861  vma_delete(m_hAllocator, pBlockToDelete);
    10862  }
    10863 }
    10864 
    10865 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10866 {
    10867  VkDeviceSize result = 0;
    10868  for(size_t i = m_Blocks.size(); i--; )
    10869  {
    10870  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10871  if(result >= m_PreferredBlockSize)
    10872  {
    10873  break;
    10874  }
    10875  }
    10876  return result;
    10877 }
    10878 
    10879 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10880 {
    10881  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10882  {
    10883  if(m_Blocks[blockIndex] == pBlock)
    10884  {
    10885  VmaVectorRemove(m_Blocks, blockIndex);
    10886  return;
    10887  }
    10888  }
    10889  VMA_ASSERT(0);
    10890 }
    10891 
    10892 void VmaBlockVector::IncrementallySortBlocks()
    10893 {
    10894  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10895  {
    10896  // Bubble sort only until first swap.
    10897  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10898  {
    10899  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10900  {
    10901  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10902  return;
    10903  }
    10904  }
    10905  }
    10906 }
    10907 
    10908 VkResult VmaBlockVector::AllocateFromBlock(
    10909  VmaDeviceMemoryBlock* pBlock,
    10910  VmaPool hCurrentPool,
    10911  uint32_t currentFrameIndex,
    10912  VkDeviceSize size,
    10913  VkDeviceSize alignment,
    10914  VmaAllocationCreateFlags allocFlags,
    10915  void* pUserData,
    10916  VmaSuballocationType suballocType,
    10917  uint32_t strategy,
    10918  VmaAllocation* pAllocation)
    10919 {
    10920  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10921  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10922  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10923  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10924 
    10925  VmaAllocationRequest currRequest = {};
    10926  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10927  currentFrameIndex,
    10928  m_FrameInUseCount,
    10929  m_BufferImageGranularity,
    10930  size,
    10931  alignment,
    10932  isUpperAddress,
    10933  suballocType,
    10934  false, // canMakeOtherLost
    10935  strategy,
    10936  &currRequest))
    10937  {
    10938  // Allocate from pCurrBlock.
    10939  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10940 
    10941  if(mapped)
    10942  {
    10943  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10944  if(res != VK_SUCCESS)
    10945  {
    10946  return res;
    10947  }
    10948  }
    10949 
    10950  // We no longer have an empty Allocation.
    10951  if(pBlock->m_pMetadata->IsEmpty())
    10952  {
    10953  m_HasEmptyBlock = false;
    10954  }
    10955 
    10956  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10957  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10958  (*pAllocation)->InitBlockAllocation(
    10959  hCurrentPool,
    10960  pBlock,
    10961  currRequest.offset,
    10962  alignment,
    10963  size,
    10964  suballocType,
    10965  mapped,
    10966  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10967  VMA_HEAVY_ASSERT(pBlock->Validate());
    10968  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10969  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10970  {
    10971  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10972  }
    10973  if(IsCorruptionDetectionEnabled())
    10974  {
    10975  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10977  }
    10978  return VK_SUCCESS;
    10979  }
    10980  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10984 {
    10985  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10986  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10987  allocInfo.allocationSize = blockSize;
    10988  VkDeviceMemory mem = VK_NULL_HANDLE;
    10989  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10990  if(res < 0)
    10991  {
    10992  return res;
    10993  }
    10994 
    10995  // New VkDeviceMemory successfully created.
    10996 
    10997  // Create new Allocation for it.
    10998  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10999  pBlock->Init(
    11000  m_hAllocator,
    11001  m_MemoryTypeIndex,
    11002  mem,
    11003  allocInfo.allocationSize,
    11004  m_NextBlockId++,
    11005  m_Algorithm);
    11006 
    11007  m_Blocks.push_back(pBlock);
    11008  if(pNewBlockIndex != VMA_NULL)
    11009  {
    11010  *pNewBlockIndex = m_Blocks.size() - 1;
    11011  }
    11012 
    11013  return VK_SUCCESS;
    11014 }
    11015 
    11016 #if VMA_STATS_STRING_ENABLED
    11017 
    11018 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11019 {
    11020  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11021 
    11022  json.BeginObject();
    11023 
    11024  if(m_IsCustomPool)
    11025  {
    11026  json.WriteString("MemoryTypeIndex");
    11027  json.WriteNumber(m_MemoryTypeIndex);
    11028 
    11029  json.WriteString("BlockSize");
    11030  json.WriteNumber(m_PreferredBlockSize);
    11031 
    11032  json.WriteString("BlockCount");
    11033  json.BeginObject(true);
    11034  if(m_MinBlockCount > 0)
    11035  {
    11036  json.WriteString("Min");
    11037  json.WriteNumber((uint64_t)m_MinBlockCount);
    11038  }
    11039  if(m_MaxBlockCount < SIZE_MAX)
    11040  {
    11041  json.WriteString("Max");
    11042  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11043  }
    11044  json.WriteString("Cur");
    11045  json.WriteNumber((uint64_t)m_Blocks.size());
    11046  json.EndObject();
    11047 
    11048  if(m_FrameInUseCount > 0)
    11049  {
    11050  json.WriteString("FrameInUseCount");
    11051  json.WriteNumber(m_FrameInUseCount);
    11052  }
    11053 
    11054  if(m_Algorithm != 0)
    11055  {
    11056  json.WriteString("Algorithm");
    11057  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11058  }
    11059  }
    11060  else
    11061  {
    11062  json.WriteString("PreferredBlockSize");
    11063  json.WriteNumber(m_PreferredBlockSize);
    11064  }
    11065 
    11066  json.WriteString("Blocks");
    11067  json.BeginObject();
    11068  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11069  {
    11070  json.BeginString();
    11071  json.ContinueString(m_Blocks[i]->GetId());
    11072  json.EndString();
    11073 
    11074  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11075  }
    11076  json.EndObject();
    11077 
    11078  json.EndObject();
    11079 }
    11080 
    11081 #endif // #if VMA_STATS_STRING_ENABLED
    11082 
    11083 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11084  VmaAllocator hAllocator,
    11085  uint32_t currentFrameIndex)
    11086 {
    11087  if(m_pDefragmentator == VMA_NULL)
    11088  {
    11089  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11090  hAllocator,
    11091  this,
    11092  currentFrameIndex);
    11093  }
    11094 
    11095  return m_pDefragmentator;
    11096 }
    11097 
    11098 VkResult VmaBlockVector::Defragment(
    11099  VmaDefragmentationStats* pDefragmentationStats,
    11100  VkDeviceSize& maxBytesToMove,
    11101  uint32_t& maxAllocationsToMove)
    11102 {
    11103  if(m_pDefragmentator == VMA_NULL)
    11104  {
    11105  return VK_SUCCESS;
    11106  }
    11107 
    11108  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11109 
    11110  // Defragment.
    11111  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11112 
    11113  // Accumulate statistics.
    11114  if(pDefragmentationStats != VMA_NULL)
    11115  {
    11116  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11117  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11118  pDefragmentationStats->bytesMoved += bytesMoved;
    11119  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11120  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11121  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11122  maxBytesToMove -= bytesMoved;
    11123  maxAllocationsToMove -= allocationsMoved;
    11124  }
    11125 
    11126  // Free empty blocks.
    11127  m_HasEmptyBlock = false;
    11128  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11129  {
    11130  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11131  if(pBlock->m_pMetadata->IsEmpty())
    11132  {
    11133  if(m_Blocks.size() > m_MinBlockCount)
    11134  {
    11135  if(pDefragmentationStats != VMA_NULL)
    11136  {
    11137  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11138  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11139  }
    11140 
    11141  VmaVectorRemove(m_Blocks, blockIndex);
    11142  pBlock->Destroy(m_hAllocator);
    11143  vma_delete(m_hAllocator, pBlock);
    11144  }
    11145  else
    11146  {
    11147  m_HasEmptyBlock = true;
    11148  }
    11149  }
    11150  }
    11151 
    11152  return result;
    11153 }
    11154 
    11155 void VmaBlockVector::DestroyDefragmentator()
    11156 {
    11157  if(m_pDefragmentator != VMA_NULL)
    11158  {
    11159  vma_delete(m_hAllocator, m_pDefragmentator);
    11160  m_pDefragmentator = VMA_NULL;
    11161  }
    11162 }
    11163 
    11164 void VmaBlockVector::MakePoolAllocationsLost(
    11165  uint32_t currentFrameIndex,
    11166  size_t* pLostAllocationCount)
    11167 {
    11168  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11169  size_t lostAllocationCount = 0;
    11170  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11171  {
    11172  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11173  VMA_ASSERT(pBlock);
    11174  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11175  }
    11176  if(pLostAllocationCount != VMA_NULL)
    11177  {
    11178  *pLostAllocationCount = lostAllocationCount;
    11179  }
    11180 }
    11181 
    11182 VkResult VmaBlockVector::CheckCorruption()
    11183 {
    11184  if(!IsCorruptionDetectionEnabled())
    11185  {
    11186  return VK_ERROR_FEATURE_NOT_PRESENT;
    11187  }
    11188 
    11189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11190  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11191  {
    11192  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11193  VMA_ASSERT(pBlock);
    11194  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11195  if(res != VK_SUCCESS)
    11196  {
    11197  return res;
    11198  }
    11199  }
    11200  return VK_SUCCESS;
    11201 }
    11202 
    11203 void VmaBlockVector::AddStats(VmaStats* pStats)
    11204 {
    11205  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11206  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11207 
    11208  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11209 
    11210  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11211  {
    11212  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11213  VMA_ASSERT(pBlock);
    11214  VMA_HEAVY_ASSERT(pBlock->Validate());
    11215  VmaStatInfo allocationStatInfo;
    11216  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11217  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11218  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11220  }
    11221 }
    11222 
    11224 // VmaDefragmentator members definition
    11225 
    11226 VmaDefragmentator::VmaDefragmentator(
    11227  VmaAllocator hAllocator,
    11228  VmaBlockVector* pBlockVector,
    11229  uint32_t currentFrameIndex) :
    11230  m_hAllocator(hAllocator),
    11231  m_pBlockVector(pBlockVector),
    11232  m_CurrentFrameIndex(currentFrameIndex),
    11233  m_BytesMoved(0),
    11234  m_AllocationsMoved(0),
    11235  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11236  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11237 {
    11238  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11239 }
    11240 
    11241 VmaDefragmentator::~VmaDefragmentator()
    11242 {
    11243  for(size_t i = m_Blocks.size(); i--; )
    11244  {
    11245  vma_delete(m_hAllocator, m_Blocks[i]);
    11246  }
    11247 }
    11248 
    11249 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11250 {
    11251  AllocationInfo allocInfo;
    11252  allocInfo.m_hAllocation = hAlloc;
    11253  allocInfo.m_pChanged = pChanged;
    11254  m_Allocations.push_back(allocInfo);
    11255 }
    11256 
    11257 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11258 {
    11259  // It has already been mapped for defragmentation.
    11260  if(m_pMappedDataForDefragmentation)
    11261  {
    11262  *ppMappedData = m_pMappedDataForDefragmentation;
    11263  return VK_SUCCESS;
    11264  }
    11265 
    11266  // It is originally mapped.
    11267  if(m_pBlock->GetMappedData())
    11268  {
    11269  *ppMappedData = m_pBlock->GetMappedData();
    11270  return VK_SUCCESS;
    11271  }
    11272 
    11273  // Map on first usage.
    11274  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11275  *ppMappedData = m_pMappedDataForDefragmentation;
    11276  return res;
    11277 }
    11278 
    11279 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11280 {
    11281  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11282  {
    11283  m_pBlock->Unmap(hAllocator, 1);
    11284  }
    11285 }
    11286 
    11287 VkResult VmaDefragmentator::DefragmentRound(
    11288  VkDeviceSize maxBytesToMove,
    11289  uint32_t maxAllocationsToMove)
    11290 {
    11291  if(m_Blocks.empty())
    11292  {
    11293  return VK_SUCCESS;
    11294  }
    11295 
    11296  size_t srcBlockIndex = m_Blocks.size() - 1;
    11297  size_t srcAllocIndex = SIZE_MAX;
    11298  for(;;)
    11299  {
    11300  // 1. Find next allocation to move.
    11301  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11302  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11303  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11304  {
    11305  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11306  {
    11307  // Finished: no more allocations to process.
    11308  if(srcBlockIndex == 0)
    11309  {
    11310  return VK_SUCCESS;
    11311  }
    11312  else
    11313  {
    11314  --srcBlockIndex;
    11315  srcAllocIndex = SIZE_MAX;
    11316  }
    11317  }
    11318  else
    11319  {
    11320  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11321  }
    11322  }
    11323 
    11324  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11325  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11326 
    11327  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11328  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11329  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11330  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11331 
    11332  // 2. Try to find new place for this allocation in preceding or current block.
    11333  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11334  {
    11335  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11336  VmaAllocationRequest dstAllocRequest;
    11337  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11338  m_CurrentFrameIndex,
    11339  m_pBlockVector->GetFrameInUseCount(),
    11340  m_pBlockVector->GetBufferImageGranularity(),
    11341  size,
    11342  alignment,
    11343  false, // upperAddress
    11344  suballocType,
    11345  false, // canMakeOtherLost
    11347  &dstAllocRequest) &&
    11348  MoveMakesSense(
    11349  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11350  {
    11351  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11352 
    11353  // Reached limit on number of allocations or bytes to move.
    11354  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11355  (m_BytesMoved + size > maxBytesToMove))
    11356  {
    11357  return VK_INCOMPLETE;
    11358  }
    11359 
    11360  void* pDstMappedData = VMA_NULL;
    11361  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11362  if(res != VK_SUCCESS)
    11363  {
    11364  return res;
    11365  }
    11366 
    11367  void* pSrcMappedData = VMA_NULL;
    11368  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11369  if(res != VK_SUCCESS)
    11370  {
    11371  return res;
    11372  }
    11373 
    11374  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11375  memcpy(
    11376  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11377  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11378  static_cast<size_t>(size));
    11379 
    11380  if(VMA_DEBUG_MARGIN > 0)
    11381  {
    11382  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11384  }
    11385 
    11386  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11387  dstAllocRequest,
    11388  suballocType,
    11389  size,
    11390  false, // upperAddress
    11391  allocInfo.m_hAllocation);
    11392  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11393 
    11394  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11395 
    11396  if(allocInfo.m_pChanged != VMA_NULL)
    11397  {
    11398  *allocInfo.m_pChanged = VK_TRUE;
    11399  }
    11400 
    11401  ++m_AllocationsMoved;
    11402  m_BytesMoved += size;
    11403 
    11404  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11405 
    11406  break;
    11407  }
    11408  }
    11409 
    11410  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11411 
    11412  if(srcAllocIndex > 0)
    11413  {
    11414  --srcAllocIndex;
    11415  }
    11416  else
    11417  {
    11418  if(srcBlockIndex > 0)
    11419  {
    11420  --srcBlockIndex;
    11421  srcAllocIndex = SIZE_MAX;
    11422  }
    11423  else
    11424  {
    11425  return VK_SUCCESS;
    11426  }
    11427  }
    11428  }
    11429 }
    11430 
    11431 VkResult VmaDefragmentator::Defragment(
    11432  VkDeviceSize maxBytesToMove,
    11433  uint32_t maxAllocationsToMove)
    11434 {
    11435  if(m_Allocations.empty())
    11436  {
    11437  return VK_SUCCESS;
    11438  }
    11439 
    11440  // Create block info for each block.
    11441  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11442  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11443  {
    11444  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11445  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11446  m_Blocks.push_back(pBlockInfo);
    11447  }
    11448 
    11449  // Sort them by m_pBlock pointer value.
    11450  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11451 
    11452  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11453  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11454  {
    11455  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11456  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11457  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11458  {
    11459  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11460  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11461  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11462  {
    11463  (*it)->m_Allocations.push_back(allocInfo);
    11464  }
    11465  else
    11466  {
    11467  VMA_ASSERT(0);
    11468  }
    11469  }
    11470  }
    11471  m_Allocations.clear();
    11472 
    11473  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11474  {
    11475  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11476  pBlockInfo->CalcHasNonMovableAllocations();
    11477  pBlockInfo->SortAllocationsBySizeDescecnding();
    11478  }
    11479 
    11480  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11481  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11482 
    11483  // Execute defragmentation rounds (the main part).
    11484  VkResult result = VK_SUCCESS;
    11485  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11486  {
    11487  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11488  }
    11489 
    11490  // Unmap blocks that were mapped for defragmentation.
    11491  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11492  {
    11493  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11494  }
    11495 
    11496  return result;
    11497 }
    11498 
    11499 bool VmaDefragmentator::MoveMakesSense(
    11500  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11501  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11502 {
    11503  if(dstBlockIndex < srcBlockIndex)
    11504  {
    11505  return true;
    11506  }
    11507  if(dstBlockIndex > srcBlockIndex)
    11508  {
    11509  return false;
    11510  }
    11511  if(dstOffset < srcOffset)
    11512  {
    11513  return true;
    11514  }
    11515  return false;
    11516 }
    11517 
    11519 // VmaRecorder
    11520 
    11521 #if VMA_RECORDING_ENABLED
    11522 
    11523 VmaRecorder::VmaRecorder() :
    11524  m_UseMutex(true),
    11525  m_Flags(0),
    11526  m_File(VMA_NULL),
    11527  m_Freq(INT64_MAX),
    11528  m_StartCounter(INT64_MAX)
    11529 {
    11530 }
    11531 
    11532 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11533 {
    11534  m_UseMutex = useMutex;
    11535  m_Flags = settings.flags;
    11536 
    11537  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11538  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11539 
    11540  // Open file for writing.
    11541  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11542  if(err != 0)
    11543  {
    11544  return VK_ERROR_INITIALIZATION_FAILED;
    11545  }
    11546 
    11547  // Write header.
    11548  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11549  fprintf(m_File, "%s\n", "1,4");
    11550 
    11551  return VK_SUCCESS;
    11552 }
    11553 
    11554 VmaRecorder::~VmaRecorder()
    11555 {
    11556  if(m_File != VMA_NULL)
    11557  {
    11558  fclose(m_File);
    11559  }
    11560 }
    11561 
    11562 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11563 {
    11564  CallParams callParams;
    11565  GetBasicParams(callParams);
    11566 
    11567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11568  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11569  Flush();
    11570 }
    11571 
    11572 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11573 {
    11574  CallParams callParams;
    11575  GetBasicParams(callParams);
    11576 
    11577  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11578  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11589  createInfo.memoryTypeIndex,
    11590  createInfo.flags,
    11591  createInfo.blockSize,
    11592  (uint64_t)createInfo.minBlockCount,
    11593  (uint64_t)createInfo.maxBlockCount,
    11594  createInfo.frameInUseCount,
    11595  pool);
    11596  Flush();
    11597 }
    11598 
    11599 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11606  pool);
    11607  Flush();
    11608 }
    11609 
    11610 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11611  const VkMemoryRequirements& vkMemReq,
    11612  const VmaAllocationCreateInfo& createInfo,
    11613  VmaAllocation allocation)
    11614 {
    11615  CallParams callParams;
    11616  GetBasicParams(callParams);
    11617 
    11618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11619  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11620  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11621  vkMemReq.size,
    11622  vkMemReq.alignment,
    11623  vkMemReq.memoryTypeBits,
    11624  createInfo.flags,
    11625  createInfo.usage,
    11626  createInfo.requiredFlags,
    11627  createInfo.preferredFlags,
    11628  createInfo.memoryTypeBits,
    11629  createInfo.pool,
    11630  allocation,
    11631  userDataStr.GetString());
    11632  Flush();
    11633 }
    11634 
    11635 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11636  const VkMemoryRequirements& vkMemReq,
    11637  bool requiresDedicatedAllocation,
    11638  bool prefersDedicatedAllocation,
    11639  const VmaAllocationCreateInfo& createInfo,
    11640  VmaAllocation allocation)
    11641 {
    11642  CallParams callParams;
    11643  GetBasicParams(callParams);
    11644 
    11645  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11646  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11647  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11648  vkMemReq.size,
    11649  vkMemReq.alignment,
    11650  vkMemReq.memoryTypeBits,
    11651  requiresDedicatedAllocation ? 1 : 0,
    11652  prefersDedicatedAllocation ? 1 : 0,
    11653  createInfo.flags,
    11654  createInfo.usage,
    11655  createInfo.requiredFlags,
    11656  createInfo.preferredFlags,
    11657  createInfo.memoryTypeBits,
    11658  createInfo.pool,
    11659  allocation,
    11660  userDataStr.GetString());
    11661  Flush();
    11662 }
    11663 
    11664 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11665  const VkMemoryRequirements& vkMemReq,
    11666  bool requiresDedicatedAllocation,
    11667  bool prefersDedicatedAllocation,
    11668  const VmaAllocationCreateInfo& createInfo,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11677  vkMemReq.size,
    11678  vkMemReq.alignment,
    11679  vkMemReq.memoryTypeBits,
    11680  requiresDedicatedAllocation ? 1 : 0,
    11681  prefersDedicatedAllocation ? 1 : 0,
    11682  createInfo.flags,
    11683  createInfo.usage,
    11684  createInfo.requiredFlags,
    11685  createInfo.preferredFlags,
    11686  createInfo.memoryTypeBits,
    11687  createInfo.pool,
    11688  allocation,
    11689  userDataStr.GetString());
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordResizeAllocation(
    11706  uint32_t frameIndex,
    11707  VmaAllocation allocation,
    11708  VkDeviceSize newSize)
    11709 {
    11710  CallParams callParams;
    11711  GetBasicParams(callParams);
    11712 
    11713  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11714  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11715  allocation, newSize);
    11716  Flush();
    11717 }
    11718 
    11719 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11720  VmaAllocation allocation,
    11721  const void* pUserData)
    11722 {
    11723  CallParams callParams;
    11724  GetBasicParams(callParams);
    11725 
    11726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11727  UserDataString userDataStr(
    11728  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11729  pUserData);
    11730  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11731  allocation,
    11732  userDataStr.GetString());
    11733  Flush();
    11734 }
    11735 
    11736 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11737  VmaAllocation allocation)
    11738 {
    11739  CallParams callParams;
    11740  GetBasicParams(callParams);
    11741 
    11742  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11743  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11744  allocation);
    11745  Flush();
    11746 }
    11747 
    11748 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11749  VmaAllocation allocation)
    11750 {
    11751  CallParams callParams;
    11752  GetBasicParams(callParams);
    11753 
    11754  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11755  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11756  allocation);
    11757  Flush();
    11758 }
    11759 
    11760 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11761  VmaAllocation allocation)
    11762 {
    11763  CallParams callParams;
    11764  GetBasicParams(callParams);
    11765 
    11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11767  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11768  allocation);
    11769  Flush();
    11770 }
    11771 
    11772 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11773  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11774 {
    11775  CallParams callParams;
    11776  GetBasicParams(callParams);
    11777 
    11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11779  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11780  allocation,
    11781  offset,
    11782  size);
    11783  Flush();
    11784 }
    11785 
    11786 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11787  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11788 {
    11789  CallParams callParams;
    11790  GetBasicParams(callParams);
    11791 
    11792  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11793  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11794  allocation,
    11795  offset,
    11796  size);
    11797  Flush();
    11798 }
    11799 
    11800 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11801  const VkBufferCreateInfo& bufCreateInfo,
    11802  const VmaAllocationCreateInfo& allocCreateInfo,
    11803  VmaAllocation allocation)
    11804 {
    11805  CallParams callParams;
    11806  GetBasicParams(callParams);
    11807 
    11808  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11809  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11810  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11811  bufCreateInfo.flags,
    11812  bufCreateInfo.size,
    11813  bufCreateInfo.usage,
    11814  bufCreateInfo.sharingMode,
    11815  allocCreateInfo.flags,
    11816  allocCreateInfo.usage,
    11817  allocCreateInfo.requiredFlags,
    11818  allocCreateInfo.preferredFlags,
    11819  allocCreateInfo.memoryTypeBits,
    11820  allocCreateInfo.pool,
    11821  allocation,
    11822  userDataStr.GetString());
    11823  Flush();
    11824 }
    11825 
    11826 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11827  const VkImageCreateInfo& imageCreateInfo,
    11828  const VmaAllocationCreateInfo& allocCreateInfo,
    11829  VmaAllocation allocation)
    11830 {
    11831  CallParams callParams;
    11832  GetBasicParams(callParams);
    11833 
    11834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11835  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11836  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11837  imageCreateInfo.flags,
    11838  imageCreateInfo.imageType,
    11839  imageCreateInfo.format,
    11840  imageCreateInfo.extent.width,
    11841  imageCreateInfo.extent.height,
    11842  imageCreateInfo.extent.depth,
    11843  imageCreateInfo.mipLevels,
    11844  imageCreateInfo.arrayLayers,
    11845  imageCreateInfo.samples,
    11846  imageCreateInfo.tiling,
    11847  imageCreateInfo.usage,
    11848  imageCreateInfo.sharingMode,
    11849  imageCreateInfo.initialLayout,
    11850  allocCreateInfo.flags,
    11851  allocCreateInfo.usage,
    11852  allocCreateInfo.requiredFlags,
    11853  allocCreateInfo.preferredFlags,
    11854  allocCreateInfo.memoryTypeBits,
    11855  allocCreateInfo.pool,
    11856  allocation,
    11857  userDataStr.GetString());
    11858  Flush();
    11859 }
    11860 
    11861 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11862  VmaAllocation allocation)
    11863 {
    11864  CallParams callParams;
    11865  GetBasicParams(callParams);
    11866 
    11867  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11868  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11869  allocation);
    11870  Flush();
    11871 }
    11872 
    11873 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11874  VmaAllocation allocation)
    11875 {
    11876  CallParams callParams;
    11877  GetBasicParams(callParams);
    11878 
    11879  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11880  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11881  allocation);
    11882  Flush();
    11883 }
    11884 
    11885 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11886  VmaAllocation allocation)
    11887 {
    11888  CallParams callParams;
    11889  GetBasicParams(callParams);
    11890 
    11891  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11892  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11893  allocation);
    11894  Flush();
    11895 }
    11896 
    11897 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11898  VmaAllocation allocation)
    11899 {
    11900  CallParams callParams;
    11901  GetBasicParams(callParams);
    11902 
    11903  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11904  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11905  allocation);
    11906  Flush();
    11907 }
    11908 
    11909 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11910  VmaPool pool)
    11911 {
    11912  CallParams callParams;
    11913  GetBasicParams(callParams);
    11914 
    11915  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11916  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11917  pool);
    11918  Flush();
    11919 }
    11920 
    11921 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11922 {
    11923  if(pUserData != VMA_NULL)
    11924  {
    11925  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11926  {
    11927  m_Str = (const char*)pUserData;
    11928  }
    11929  else
    11930  {
    11931  sprintf_s(m_PtrStr, "%p", pUserData);
    11932  m_Str = m_PtrStr;
    11933  }
    11934  }
    11935  else
    11936  {
    11937  m_Str = "";
    11938  }
    11939 }
    11940 
    11941 void VmaRecorder::WriteConfiguration(
    11942  const VkPhysicalDeviceProperties& devProps,
    11943  const VkPhysicalDeviceMemoryProperties& memProps,
    11944  bool dedicatedAllocationExtensionEnabled)
    11945 {
    11946  fprintf(m_File, "Config,Begin\n");
    11947 
    11948  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11949  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11950  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11951  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11952  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11953  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11954 
    11955  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11956  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11957  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11958 
    11959  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11960  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11961  {
    11962  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11964  }
    11965  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11966  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11967  {
    11968  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11970  }
    11971 
    11972  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11973 
    11974  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11981  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11982  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11983 
    11984  fprintf(m_File, "Config,End\n");
    11985 }
    11986 
    11987 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11988 {
    11989  outParams.threadId = GetCurrentThreadId();
    11990 
    11991  LARGE_INTEGER counter;
    11992  QueryPerformanceCounter(&counter);
    11993  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11994 }
    11995 
    11996 void VmaRecorder::Flush()
    11997 {
    11998  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11999  {
    12000  fflush(m_File);
    12001  }
    12002 }
    12003 
    12004 #endif // #if VMA_RECORDING_ENABLED
    12005 
    12007 // VmaAllocator_T
    12008 
    12009 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12010  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12011  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12012  m_hDevice(pCreateInfo->device),
    12013  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12014  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12015  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12016  m_PreferredLargeHeapBlockSize(0),
    12017  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12018  m_CurrentFrameIndex(0),
    12019  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12020  m_NextPoolId(0)
    12022  ,m_pRecorder(VMA_NULL)
    12023 #endif
    12024 {
    12025  if(VMA_DEBUG_DETECT_CORRUPTION)
    12026  {
    12027  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12028  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12029  }
    12030 
    12031  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12032 
    12033 #if !(VMA_DEDICATED_ALLOCATION)
    12035  {
    12036  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12037  }
    12038 #endif
    12039 
    12040  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12041  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12042  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12043 
    12044  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12045  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12046 
    12047  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12048  {
    12049  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12050  }
    12051 
    12052  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12053  {
    12054  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12055  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12056  }
    12057 
    12058  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12059 
    12060  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12062 
    12063  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12065  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12067 
    12068  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12069  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12070 
    12071  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12072  {
    12073  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12074  {
    12075  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12076  if(limit != VK_WHOLE_SIZE)
    12077  {
    12078  m_HeapSizeLimit[heapIndex] = limit;
    12079  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12080  {
    12081  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12082  }
    12083  }
    12084  }
    12085  }
    12086 
    12087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12088  {
    12089  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12090 
    12091  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12092  this,
    12093  memTypeIndex,
    12094  preferredBlockSize,
    12095  0,
    12096  SIZE_MAX,
    12097  GetBufferImageGranularity(),
    12098  pCreateInfo->frameInUseCount,
    12099  false, // isCustomPool
    12100  false, // explicitBlockSize
    12101  false); // linearAlgorithm
    12102  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12103  // becase minBlockCount is 0.
    12104  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12105 
    12106  }
    12107 }
    12108 
    12109 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12110 {
    12111  VkResult res = VK_SUCCESS;
    12112 
    12113  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12114  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12115  {
    12116 #if VMA_RECORDING_ENABLED
    12117  m_pRecorder = vma_new(this, VmaRecorder)();
    12118  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12119  if(res != VK_SUCCESS)
    12120  {
    12121  return res;
    12122  }
    12123  m_pRecorder->WriteConfiguration(
    12124  m_PhysicalDeviceProperties,
    12125  m_MemProps,
    12126  m_UseKhrDedicatedAllocation);
    12127  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12128 #else
    12129  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12130  return VK_ERROR_FEATURE_NOT_PRESENT;
    12131 #endif
    12132  }
    12133 
    12134  return res;
    12135 }
    12136 
    12137 VmaAllocator_T::~VmaAllocator_T()
    12138 {
    12139 #if VMA_RECORDING_ENABLED
    12140  if(m_pRecorder != VMA_NULL)
    12141  {
    12142  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12143  vma_delete(this, m_pRecorder);
    12144  }
    12145 #endif
    12146 
    12147  VMA_ASSERT(m_Pools.empty());
    12148 
    12149  for(size_t i = GetMemoryTypeCount(); i--; )
    12150  {
    12151  vma_delete(this, m_pDedicatedAllocations[i]);
    12152  vma_delete(this, m_pBlockVectors[i]);
    12153  }
    12154 }
    12155 
    12156 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12157 {
    12158 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12159  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12160  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12161  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12162  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12163  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12164  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12165  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12166  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12167  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12168  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12169  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12170  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12171  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12172  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12173  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12174  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12175 #if VMA_DEDICATED_ALLOCATION
    12176  if(m_UseKhrDedicatedAllocation)
    12177  {
    12178  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12179  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12180  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12181  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12182  }
    12183 #endif // #if VMA_DEDICATED_ALLOCATION
    12184 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12185 
    12186 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12187  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12188 
    12189  if(pVulkanFunctions != VMA_NULL)
    12190  {
    12191  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12194  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12198  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12200  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12202  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12204  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12206  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12207 #if VMA_DEDICATED_ALLOCATION
    12208  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12209  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12210 #endif
    12211  }
    12212 
    12213 #undef VMA_COPY_IF_NOT_NULL
    12214 
    12215  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12216  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12233 #if VMA_DEDICATED_ALLOCATION
    12234  if(m_UseKhrDedicatedAllocation)
    12235  {
    12236  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12238  }
    12239 #endif
    12240 }
    12241 
    12242 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12243 {
    12244  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12245  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12246  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12247  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12248 }
    12249 
    12250 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12251  VkDeviceSize size,
    12252  VkDeviceSize alignment,
    12253  bool dedicatedAllocation,
    12254  VkBuffer dedicatedBuffer,
    12255  VkImage dedicatedImage,
    12256  const VmaAllocationCreateInfo& createInfo,
    12257  uint32_t memTypeIndex,
    12258  VmaSuballocationType suballocType,
    12259  VmaAllocation* pAllocation)
    12260 {
    12261  VMA_ASSERT(pAllocation != VMA_NULL);
    12262  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12263 
    12264  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12265 
    12266  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12267  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12268  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12269  {
    12270  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12271  }
    12272 
    12273  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12274  VMA_ASSERT(blockVector);
    12275 
    12276  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12277  bool preferDedicatedMemory =
    12278  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12279  dedicatedAllocation ||
    12280  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12281  size > preferredBlockSize / 2;
    12282 
    12283  if(preferDedicatedMemory &&
    12284  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12285  finalCreateInfo.pool == VK_NULL_HANDLE)
    12286  {
    12288  }
    12289 
    12290  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12291  {
    12292  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12293  {
    12294  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12295  }
    12296  else
    12297  {
    12298  return AllocateDedicatedMemory(
    12299  size,
    12300  suballocType,
    12301  memTypeIndex,
    12302  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12304  finalCreateInfo.pUserData,
    12305  dedicatedBuffer,
    12306  dedicatedImage,
    12307  pAllocation);
    12308  }
    12309  }
    12310  else
    12311  {
    12312  VkResult res = blockVector->Allocate(
    12313  VK_NULL_HANDLE, // hCurrentPool
    12314  m_CurrentFrameIndex.load(),
    12315  size,
    12316  alignment,
    12317  finalCreateInfo,
    12318  suballocType,
    12319  pAllocation);
    12320  if(res == VK_SUCCESS)
    12321  {
    12322  return res;
    12323  }
    12324 
    12325  // 5. Try dedicated memory.
    12326  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12327  {
    12328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12329  }
    12330  else
    12331  {
    12332  res = AllocateDedicatedMemory(
    12333  size,
    12334  suballocType,
    12335  memTypeIndex,
    12336  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12338  finalCreateInfo.pUserData,
    12339  dedicatedBuffer,
    12340  dedicatedImage,
    12341  pAllocation);
    12342  if(res == VK_SUCCESS)
    12343  {
    12344  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12345  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12346  return VK_SUCCESS;
    12347  }
    12348  else
    12349  {
    12350  // Everything failed: Return error code.
    12351  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12352  return res;
    12353  }
    12354  }
    12355  }
    12356 }
    12357 
    12358 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12359  VkDeviceSize size,
    12360  VmaSuballocationType suballocType,
    12361  uint32_t memTypeIndex,
    12362  bool map,
    12363  bool isUserDataString,
    12364  void* pUserData,
    12365  VkBuffer dedicatedBuffer,
    12366  VkImage dedicatedImage,
    12367  VmaAllocation* pAllocation)
    12368 {
    12369  VMA_ASSERT(pAllocation);
    12370 
    12371  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12372  allocInfo.memoryTypeIndex = memTypeIndex;
    12373  allocInfo.allocationSize = size;
    12374 
    12375 #if VMA_DEDICATED_ALLOCATION
    12376  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12377  if(m_UseKhrDedicatedAllocation)
    12378  {
    12379  if(dedicatedBuffer != VK_NULL_HANDLE)
    12380  {
    12381  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12382  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  else if(dedicatedImage != VK_NULL_HANDLE)
    12386  {
    12387  dedicatedAllocInfo.image = dedicatedImage;
    12388  allocInfo.pNext = &dedicatedAllocInfo;
    12389  }
    12390  }
    12391 #endif // #if VMA_DEDICATED_ALLOCATION
    12392 
    12393  // Allocate VkDeviceMemory.
    12394  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12395  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12396  if(res < 0)
    12397  {
    12398  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12399  return res;
    12400  }
    12401 
    12402  void* pMappedData = VMA_NULL;
    12403  if(map)
    12404  {
    12405  res = (*m_VulkanFunctions.vkMapMemory)(
    12406  m_hDevice,
    12407  hMemory,
    12408  0,
    12409  VK_WHOLE_SIZE,
    12410  0,
    12411  &pMappedData);
    12412  if(res < 0)
    12413  {
    12414  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12415  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12416  return res;
    12417  }
    12418  }
    12419 
    12420  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12421  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12422  (*pAllocation)->SetUserData(this, pUserData);
    12423  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12424  {
    12425  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12426  }
    12427 
    12428  // Register it in m_pDedicatedAllocations.
    12429  {
    12430  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12431  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12432  VMA_ASSERT(pDedicatedAllocations);
    12433  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12434  }
    12435 
    12436  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12437 
    12438  return VK_SUCCESS;
    12439 }
    12440 
    12441 void VmaAllocator_T::GetBufferMemoryRequirements(
    12442  VkBuffer hBuffer,
    12443  VkMemoryRequirements& memReq,
    12444  bool& requiresDedicatedAllocation,
    12445  bool& prefersDedicatedAllocation) const
    12446 {
    12447 #if VMA_DEDICATED_ALLOCATION
    12448  if(m_UseKhrDedicatedAllocation)
    12449  {
    12450  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12451  memReqInfo.buffer = hBuffer;
    12452 
    12453  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12454 
    12455  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12456  memReq2.pNext = &memDedicatedReq;
    12457 
    12458  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12459 
    12460  memReq = memReq2.memoryRequirements;
    12461  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12462  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12463  }
    12464  else
    12465 #endif // #if VMA_DEDICATED_ALLOCATION
    12466  {
    12467  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12468  requiresDedicatedAllocation = false;
    12469  prefersDedicatedAllocation = false;
    12470  }
    12471 }
    12472 
    12473 void VmaAllocator_T::GetImageMemoryRequirements(
    12474  VkImage hImage,
    12475  VkMemoryRequirements& memReq,
    12476  bool& requiresDedicatedAllocation,
    12477  bool& prefersDedicatedAllocation) const
    12478 {
    12479 #if VMA_DEDICATED_ALLOCATION
    12480  if(m_UseKhrDedicatedAllocation)
    12481  {
    12482  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12483  memReqInfo.image = hImage;
    12484 
    12485  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12486 
    12487  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12488  memReq2.pNext = &memDedicatedReq;
    12489 
    12490  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12491 
    12492  memReq = memReq2.memoryRequirements;
    12493  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12494  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12495  }
    12496  else
    12497 #endif // #if VMA_DEDICATED_ALLOCATION
    12498  {
    12499  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12500  requiresDedicatedAllocation = false;
    12501  prefersDedicatedAllocation = false;
    12502  }
    12503 }
    12504 
    12505 VkResult VmaAllocator_T::AllocateMemory(
    12506  const VkMemoryRequirements& vkMemReq,
    12507  bool requiresDedicatedAllocation,
    12508  bool prefersDedicatedAllocation,
    12509  VkBuffer dedicatedBuffer,
    12510  VkImage dedicatedImage,
    12511  const VmaAllocationCreateInfo& createInfo,
    12512  VmaSuballocationType suballocType,
    12513  VmaAllocation* pAllocation)
    12514 {
    12515  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12516 
    12517  if(vkMemReq.size == 0)
    12518  {
    12519  return VK_ERROR_VALIDATION_FAILED_EXT;
    12520  }
    12521  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12522  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12523  {
    12524  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12525  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12526  }
    12527  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12529  {
    12530  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12531  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12532  }
    12533  if(requiresDedicatedAllocation)
    12534  {
    12535  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12536  {
    12537  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  if(createInfo.pool != VK_NULL_HANDLE)
    12541  {
    12542  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12543  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12544  }
    12545  }
    12546  if((createInfo.pool != VK_NULL_HANDLE) &&
    12547  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12548  {
    12549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12551  }
    12552 
    12553  if(createInfo.pool != VK_NULL_HANDLE)
    12554  {
    12555  const VkDeviceSize alignmentForPool = VMA_MAX(
    12556  vkMemReq.alignment,
    12557  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12558  return createInfo.pool->m_BlockVector.Allocate(
    12559  createInfo.pool,
    12560  m_CurrentFrameIndex.load(),
    12561  vkMemReq.size,
    12562  alignmentForPool,
    12563  createInfo,
    12564  suballocType,
    12565  pAllocation);
    12566  }
    12567  else
    12568  {
    12569  // Bit mask of memory Vulkan types acceptable for this allocation.
    12570  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12571  uint32_t memTypeIndex = UINT32_MAX;
    12572  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12573  if(res == VK_SUCCESS)
    12574  {
    12575  VkDeviceSize alignmentForMemType = VMA_MAX(
    12576  vkMemReq.alignment,
    12577  GetMemoryTypeMinAlignment(memTypeIndex));
    12578 
    12579  res = AllocateMemoryOfType(
    12580  vkMemReq.size,
    12581  alignmentForMemType,
    12582  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12583  dedicatedBuffer,
    12584  dedicatedImage,
    12585  createInfo,
    12586  memTypeIndex,
    12587  suballocType,
    12588  pAllocation);
    12589  // Succeeded on first try.
    12590  if(res == VK_SUCCESS)
    12591  {
    12592  return res;
    12593  }
    12594  // Allocation from this memory type failed. Try other compatible memory types.
    12595  else
    12596  {
    12597  for(;;)
    12598  {
    12599  // Remove old memTypeIndex from list of possibilities.
    12600  memoryTypeBits &= ~(1u << memTypeIndex);
    12601  // Find alternative memTypeIndex.
    12602  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12603  if(res == VK_SUCCESS)
    12604  {
    12605  alignmentForMemType = VMA_MAX(
    12606  vkMemReq.alignment,
    12607  GetMemoryTypeMinAlignment(memTypeIndex));
    12608 
    12609  res = AllocateMemoryOfType(
    12610  vkMemReq.size,
    12611  alignmentForMemType,
    12612  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12613  dedicatedBuffer,
    12614  dedicatedImage,
    12615  createInfo,
    12616  memTypeIndex,
    12617  suballocType,
    12618  pAllocation);
    12619  // Allocation from this alternative memory type succeeded.
    12620  if(res == VK_SUCCESS)
    12621  {
    12622  return res;
    12623  }
    12624  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12625  }
    12626  // No other matching memory type index could be found.
    12627  else
    12628  {
    12629  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12630  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12631  }
    12632  }
    12633  }
    12634  }
    12635  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12636  else
    12637  return res;
    12638  }
    12639 }
    12640 
    12641 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12642 {
    12643  VMA_ASSERT(allocation);
    12644 
    12645  if(TouchAllocation(allocation))
    12646  {
    12647  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12648  {
    12649  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12650  }
    12651 
    12652  switch(allocation->GetType())
    12653  {
    12654  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12655  {
    12656  VmaBlockVector* pBlockVector = VMA_NULL;
    12657  VmaPool hPool = allocation->GetPool();
    12658  if(hPool != VK_NULL_HANDLE)
    12659  {
    12660  pBlockVector = &hPool->m_BlockVector;
    12661  }
    12662  else
    12663  {
    12664  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12665  pBlockVector = m_pBlockVectors[memTypeIndex];
    12666  }
    12667  pBlockVector->Free(allocation);
    12668  }
    12669  break;
    12670  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12671  FreeDedicatedMemory(allocation);
    12672  break;
    12673  default:
    12674  VMA_ASSERT(0);
    12675  }
    12676  }
    12677 
    12678  allocation->SetUserData(this, VMA_NULL);
    12679  vma_delete(this, allocation);
    12680 }
    12681 
    12682 VkResult VmaAllocator_T::ResizeAllocation(
    12683  const VmaAllocation alloc,
    12684  VkDeviceSize newSize)
    12685 {
    12686  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12687  {
    12688  return VK_ERROR_VALIDATION_FAILED_EXT;
    12689  }
    12690  if(newSize == alloc->GetSize())
    12691  {
    12692  return VK_SUCCESS;
    12693  }
    12694 
    12695  switch(alloc->GetType())
    12696  {
    12697  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12698  return VK_ERROR_FEATURE_NOT_PRESENT;
    12699  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12700  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12701  {
    12702  alloc->ChangeSize(newSize);
    12703  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12704  return VK_SUCCESS;
    12705  }
    12706  else
    12707  {
    12708  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12709  }
    12710  default:
    12711  VMA_ASSERT(0);
    12712  return VK_ERROR_VALIDATION_FAILED_EXT;
    12713  }
    12714 }
    12715 
    12716 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12717 {
    12718  // Initialize.
    12719  InitStatInfo(pStats->total);
    12720  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12721  InitStatInfo(pStats->memoryType[i]);
    12722  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12723  InitStatInfo(pStats->memoryHeap[i]);
    12724 
    12725  // Process default pools.
    12726  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12727  {
    12728  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12729  VMA_ASSERT(pBlockVector);
    12730  pBlockVector->AddStats(pStats);
    12731  }
    12732 
    12733  // Process custom pools.
    12734  {
    12735  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12736  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12737  {
    12738  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12739  }
    12740  }
    12741 
    12742  // Process dedicated allocations.
    12743  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12744  {
    12745  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12746  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12747  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12748  VMA_ASSERT(pDedicatedAllocVector);
    12749  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12750  {
    12751  VmaStatInfo allocationStatInfo;
    12752  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12753  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12754  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12756  }
    12757  }
    12758 
    12759  // Postprocess.
    12760  VmaPostprocessCalcStatInfo(pStats->total);
    12761  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12762  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12763  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12764  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12765 }
    12766 
    12767 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12768 
    12769 VkResult VmaAllocator_T::Defragment(
    12770  VmaAllocation* pAllocations,
    12771  size_t allocationCount,
    12772  VkBool32* pAllocationsChanged,
    12773  const VmaDefragmentationInfo* pDefragmentationInfo,
    12774  VmaDefragmentationStats* pDefragmentationStats)
    12775 {
    12776  if(pAllocationsChanged != VMA_NULL)
    12777  {
    12778  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12779  }
    12780  if(pDefragmentationStats != VMA_NULL)
    12781  {
    12782  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12783  }
    12784 
    12785  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12786 
    12787  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12788 
    12789  const size_t poolCount = m_Pools.size();
    12790 
    12791  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12792  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12793  {
    12794  VmaAllocation hAlloc = pAllocations[allocIndex];
    12795  VMA_ASSERT(hAlloc);
    12796  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12797  // DedicatedAlloc cannot be defragmented.
    12798  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12799  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12800  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12801  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12802  // Lost allocation cannot be defragmented.
    12803  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12804  {
    12805  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12806 
    12807  const VmaPool hAllocPool = hAlloc->GetPool();
    12808  // This allocation belongs to custom pool.
    12809  if(hAllocPool != VK_NULL_HANDLE)
    12810  {
    12811  // Pools with linear or buddy algorithm are not defragmented.
    12812  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12813  {
    12814  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12815  }
    12816  }
    12817  // This allocation belongs to general pool.
    12818  else
    12819  {
    12820  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12821  }
    12822 
    12823  if(pAllocBlockVector != VMA_NULL)
    12824  {
    12825  VmaDefragmentator* const pDefragmentator =
    12826  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12827  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12828  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12829  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12830  }
    12831  }
    12832  }
    12833 
    12834  VkResult result = VK_SUCCESS;
    12835 
    12836  // ======== Main processing.
    12837 
    12838  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12839  uint32_t maxAllocationsToMove = UINT32_MAX;
    12840  if(pDefragmentationInfo != VMA_NULL)
    12841  {
    12842  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12843  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12844  }
    12845 
    12846  // Process standard memory.
    12847  for(uint32_t memTypeIndex = 0;
    12848  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12849  ++memTypeIndex)
    12850  {
    12851  // Only HOST_VISIBLE memory types can be defragmented.
    12852  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12853  {
    12854  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12855  pDefragmentationStats,
    12856  maxBytesToMove,
    12857  maxAllocationsToMove);
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12863  {
    12864  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12865  pDefragmentationStats,
    12866  maxBytesToMove,
    12867  maxAllocationsToMove);
    12868  }
    12869 
    12870  // ======== Destroy defragmentators.
    12871 
    12872  // Process custom pools.
    12873  for(size_t poolIndex = poolCount; poolIndex--; )
    12874  {
    12875  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12876  }
    12877 
    12878  // Process standard memory.
    12879  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12880  {
    12881  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12882  {
    12883  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12884  }
    12885  }
    12886 
    12887  return result;
    12888 }
    12889 
    12890 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12891 {
    12892  if(hAllocation->CanBecomeLost())
    12893  {
    12894  /*
    12895  Warning: This is a carefully designed algorithm.
    12896  Do not modify unless you really know what you're doing :)
    12897  */
    12898  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12899  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12900  for(;;)
    12901  {
    12902  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12903  {
    12904  pAllocationInfo->memoryType = UINT32_MAX;
    12905  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12906  pAllocationInfo->offset = 0;
    12907  pAllocationInfo->size = hAllocation->GetSize();
    12908  pAllocationInfo->pMappedData = VMA_NULL;
    12909  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12910  return;
    12911  }
    12912  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12913  {
    12914  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12915  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12916  pAllocationInfo->offset = hAllocation->GetOffset();
    12917  pAllocationInfo->size = hAllocation->GetSize();
    12918  pAllocationInfo->pMappedData = VMA_NULL;
    12919  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12920  return;
    12921  }
    12922  else // Last use time earlier than current time.
    12923  {
    12924  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12925  {
    12926  localLastUseFrameIndex = localCurrFrameIndex;
    12927  }
    12928  }
    12929  }
    12930  }
    12931  else
    12932  {
    12933 #if VMA_STATS_STRING_ENABLED
    12934  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12935  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12936  for(;;)
    12937  {
    12938  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12939  if(localLastUseFrameIndex == localCurrFrameIndex)
    12940  {
    12941  break;
    12942  }
    12943  else // Last use time earlier than current time.
    12944  {
    12945  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12946  {
    12947  localLastUseFrameIndex = localCurrFrameIndex;
    12948  }
    12949  }
    12950  }
    12951 #endif
    12952 
    12953  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12954  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12955  pAllocationInfo->offset = hAllocation->GetOffset();
    12956  pAllocationInfo->size = hAllocation->GetSize();
    12957  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12958  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12959  }
    12960 }
    12961 
    12962 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12963 {
    12964  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12965  if(hAllocation->CanBecomeLost())
    12966  {
    12967  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12968  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12969  for(;;)
    12970  {
    12971  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12972  {
    12973  return false;
    12974  }
    12975  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12976  {
    12977  return true;
    12978  }
    12979  else // Last use time earlier than current time.
    12980  {
    12981  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12982  {
    12983  localLastUseFrameIndex = localCurrFrameIndex;
    12984  }
    12985  }
    12986  }
    12987  }
    12988  else
    12989  {
    12990 #if VMA_STATS_STRING_ENABLED
    12991  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12992  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12993  for(;;)
    12994  {
    12995  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12996  if(localLastUseFrameIndex == localCurrFrameIndex)
    12997  {
    12998  break;
    12999  }
    13000  else // Last use time earlier than current time.
    13001  {
    13002  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13003  {
    13004  localLastUseFrameIndex = localCurrFrameIndex;
    13005  }
    13006  }
    13007  }
    13008 #endif
    13009 
    13010  return true;
    13011  }
    13012 }
    13013 
    13014 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13015 {
    13016  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13017 
    13018  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13019 
    13020  if(newCreateInfo.maxBlockCount == 0)
    13021  {
    13022  newCreateInfo.maxBlockCount = SIZE_MAX;
    13023  }
    13024  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13025  {
    13026  return VK_ERROR_INITIALIZATION_FAILED;
    13027  }
    13028 
    13029  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13030 
    13031  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13032 
    13033  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13034  if(res != VK_SUCCESS)
    13035  {
    13036  vma_delete(this, *pPool);
    13037  *pPool = VMA_NULL;
    13038  return res;
    13039  }
    13040 
    13041  // Add to m_Pools.
    13042  {
    13043  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13044  (*pPool)->SetId(m_NextPoolId++);
    13045  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13046  }
    13047 
    13048  return VK_SUCCESS;
    13049 }
    13050 
    13051 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13052 {
    13053  // Remove from m_Pools.
    13054  {
    13055  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13056  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13057  VMA_ASSERT(success && "Pool not found in Allocator.");
    13058  }
    13059 
    13060  vma_delete(this, pool);
    13061 }
    13062 
    13063 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13064 {
    13065  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13066 }
    13067 
    13068 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13069 {
    13070  m_CurrentFrameIndex.store(frameIndex);
    13071 }
    13072 
    13073 void VmaAllocator_T::MakePoolAllocationsLost(
    13074  VmaPool hPool,
    13075  size_t* pLostAllocationCount)
    13076 {
    13077  hPool->m_BlockVector.MakePoolAllocationsLost(
    13078  m_CurrentFrameIndex.load(),
    13079  pLostAllocationCount);
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13083 {
    13084  return hPool->m_BlockVector.CheckCorruption();
    13085 }
    13086 
    13087 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13088 {
    13089  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13090 
    13091  // Process default pools.
    13092  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13093  {
    13094  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13095  {
    13096  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13097  VMA_ASSERT(pBlockVector);
    13098  VkResult localRes = pBlockVector->CheckCorruption();
    13099  switch(localRes)
    13100  {
    13101  case VK_ERROR_FEATURE_NOT_PRESENT:
    13102  break;
    13103  case VK_SUCCESS:
    13104  finalRes = VK_SUCCESS;
    13105  break;
    13106  default:
    13107  return localRes;
    13108  }
    13109  }
    13110  }
    13111 
    13112  // Process custom pools.
    13113  {
    13114  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13115  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13116  {
    13117  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13118  {
    13119  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13120  switch(localRes)
    13121  {
    13122  case VK_ERROR_FEATURE_NOT_PRESENT:
    13123  break;
    13124  case VK_SUCCESS:
    13125  finalRes = VK_SUCCESS;
    13126  break;
    13127  default:
    13128  return localRes;
    13129  }
    13130  }
    13131  }
    13132  }
    13133 
    13134  return finalRes;
    13135 }
    13136 
    13137 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13138 {
    13139  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13140  (*pAllocation)->InitLost();
    13141 }
    13142 
    13143 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13144 {
    13145  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13146 
    13147  VkResult res;
    13148  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13149  {
    13150  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13151  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13152  {
    13153  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13154  if(res == VK_SUCCESS)
    13155  {
    13156  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13162  }
    13163  }
    13164  else
    13165  {
    13166  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13167  }
    13168 
    13169  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13170  {
    13171  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13172  }
    13173 
    13174  return res;
    13175 }
    13176 
    13177 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13178 {
    13179  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13180  {
    13181  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13182  }
    13183 
    13184  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13185 
    13186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13187  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13188  {
    13189  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13190  m_HeapSizeLimit[heapIndex] += size;
    13191  }
    13192 }
    13193 
    13194 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13195 {
    13196  if(hAllocation->CanBecomeLost())
    13197  {
    13198  return VK_ERROR_MEMORY_MAP_FAILED;
    13199  }
    13200 
    13201  switch(hAllocation->GetType())
    13202  {
    13203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13204  {
    13205  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13206  char *pBytes = VMA_NULL;
    13207  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13208  if(res == VK_SUCCESS)
    13209  {
    13210  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13211  hAllocation->BlockAllocMap();
    13212  }
    13213  return res;
    13214  }
    13215  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13216  return hAllocation->DedicatedAllocMap(this, ppData);
    13217  default:
    13218  VMA_ASSERT(0);
    13219  return VK_ERROR_MEMORY_MAP_FAILED;
    13220  }
    13221 }
    13222 
    13223 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13224 {
    13225  switch(hAllocation->GetType())
    13226  {
    13227  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13228  {
    13229  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13230  hAllocation->BlockAllocUnmap();
    13231  pBlock->Unmap(this, 1);
    13232  }
    13233  break;
    13234  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13235  hAllocation->DedicatedAllocUnmap(this);
    13236  break;
    13237  default:
    13238  VMA_ASSERT(0);
    13239  }
    13240 }
    13241 
    13242 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13243 {
    13244  VkResult res = VK_SUCCESS;
    13245  switch(hAllocation->GetType())
    13246  {
    13247  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13248  res = GetVulkanFunctions().vkBindBufferMemory(
    13249  m_hDevice,
    13250  hBuffer,
    13251  hAllocation->GetMemory(),
    13252  0); //memoryOffset
    13253  break;
    13254  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13255  {
    13256  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13257  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13258  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13259  break;
    13260  }
    13261  default:
    13262  VMA_ASSERT(0);
    13263  }
    13264  return res;
    13265 }
    13266 
    13267 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13268 {
    13269  VkResult res = VK_SUCCESS;
    13270  switch(hAllocation->GetType())
    13271  {
    13272  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13273  res = GetVulkanFunctions().vkBindImageMemory(
    13274  m_hDevice,
    13275  hImage,
    13276  hAllocation->GetMemory(),
    13277  0); //memoryOffset
    13278  break;
    13279  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13280  {
    13281  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13282  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13283  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13284  break;
    13285  }
    13286  default:
    13287  VMA_ASSERT(0);
    13288  }
    13289  return res;
    13290 }
    13291 
    13292 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13293  VmaAllocation hAllocation,
    13294  VkDeviceSize offset, VkDeviceSize size,
    13295  VMA_CACHE_OPERATION op)
    13296 {
    13297  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13298  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13299  {
    13300  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13301  VMA_ASSERT(offset <= allocationSize);
    13302 
    13303  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13304 
    13305  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13306  memRange.memory = hAllocation->GetMemory();
    13307 
    13308  switch(hAllocation->GetType())
    13309  {
    13310  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13311  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13312  if(size == VK_WHOLE_SIZE)
    13313  {
    13314  memRange.size = allocationSize - memRange.offset;
    13315  }
    13316  else
    13317  {
    13318  VMA_ASSERT(offset + size <= allocationSize);
    13319  memRange.size = VMA_MIN(
    13320  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13321  allocationSize - memRange.offset);
    13322  }
    13323  break;
    13324 
    13325  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13326  {
    13327  // 1. Still within this allocation.
    13328  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13329  if(size == VK_WHOLE_SIZE)
    13330  {
    13331  size = allocationSize - offset;
    13332  }
    13333  else
    13334  {
    13335  VMA_ASSERT(offset + size <= allocationSize);
    13336  }
    13337  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13338 
    13339  // 2. Adjust to whole block.
    13340  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13341  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13342  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13343  memRange.offset += allocationOffset;
    13344  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13345 
    13346  break;
    13347  }
    13348 
    13349  default:
    13350  VMA_ASSERT(0);
    13351  }
    13352 
    13353  switch(op)
    13354  {
    13355  case VMA_CACHE_FLUSH:
    13356  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13357  break;
    13358  case VMA_CACHE_INVALIDATE:
    13359  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13360  break;
    13361  default:
    13362  VMA_ASSERT(0);
    13363  }
    13364  }
    13365  // else: Just ignore this call.
    13366 }
    13367 
    13368 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13369 {
    13370  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13371 
    13372  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13373  {
    13374  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13375  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13376  VMA_ASSERT(pDedicatedAllocations);
    13377  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13378  VMA_ASSERT(success);
    13379  }
    13380 
    13381  VkDeviceMemory hMemory = allocation->GetMemory();
    13382 
    13383  /*
    13384  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13385  before vkFreeMemory.
    13386 
    13387  if(allocation->GetMappedData() != VMA_NULL)
    13388  {
    13389  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13390  }
    13391  */
    13392 
    13393  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13394 
    13395  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13396 }
    13397 
    13398 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13399 {
    13400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13401  !hAllocation->CanBecomeLost() &&
    13402  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13403  {
    13404  void* pData = VMA_NULL;
    13405  VkResult res = Map(hAllocation, &pData);
    13406  if(res == VK_SUCCESS)
    13407  {
    13408  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13409  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13410  Unmap(hAllocation);
    13411  }
    13412  else
    13413  {
    13414  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13415  }
    13416  }
    13417 }
    13418 
    13419 #if VMA_STATS_STRING_ENABLED
    13420 
    13421 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13422 {
    13423  bool dedicatedAllocationsStarted = false;
    13424  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13425  {
    13426  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13427  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13428  VMA_ASSERT(pDedicatedAllocVector);
    13429  if(pDedicatedAllocVector->empty() == false)
    13430  {
    13431  if(dedicatedAllocationsStarted == false)
    13432  {
    13433  dedicatedAllocationsStarted = true;
    13434  json.WriteString("DedicatedAllocations");
    13435  json.BeginObject();
    13436  }
    13437 
    13438  json.BeginString("Type ");
    13439  json.ContinueString(memTypeIndex);
    13440  json.EndString();
    13441 
    13442  json.BeginArray();
    13443 
    13444  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13445  {
    13446  json.BeginObject(true);
    13447  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13448  hAlloc->PrintParameters(json);
    13449  json.EndObject();
    13450  }
    13451 
    13452  json.EndArray();
    13453  }
    13454  }
    13455  if(dedicatedAllocationsStarted)
    13456  {
    13457  json.EndObject();
    13458  }
    13459 
    13460  {
    13461  bool allocationsStarted = false;
    13462  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13463  {
    13464  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13465  {
    13466  if(allocationsStarted == false)
    13467  {
    13468  allocationsStarted = true;
    13469  json.WriteString("DefaultPools");
    13470  json.BeginObject();
    13471  }
    13472 
    13473  json.BeginString("Type ");
    13474  json.ContinueString(memTypeIndex);
    13475  json.EndString();
    13476 
    13477  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13478  }
    13479  }
    13480  if(allocationsStarted)
    13481  {
    13482  json.EndObject();
    13483  }
    13484  }
    13485 
    13486  // Custom pools
    13487  {
    13488  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13489  const size_t poolCount = m_Pools.size();
    13490  if(poolCount > 0)
    13491  {
    13492  json.WriteString("Pools");
    13493  json.BeginObject();
    13494  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13495  {
    13496  json.BeginString();
    13497  json.ContinueString(m_Pools[poolIndex]->GetId());
    13498  json.EndString();
    13499 
    13500  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13501  }
    13502  json.EndObject();
    13503  }
    13504  }
    13505 }
    13506 
    13507 #endif // #if VMA_STATS_STRING_ENABLED
    13508 
    13510 // Public interface
    13511 
    13512 VkResult vmaCreateAllocator(
    13513  const VmaAllocatorCreateInfo* pCreateInfo,
    13514  VmaAllocator* pAllocator)
    13515 {
    13516  VMA_ASSERT(pCreateInfo && pAllocator);
    13517  VMA_DEBUG_LOG("vmaCreateAllocator");
    13518  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13519  return (*pAllocator)->Init(pCreateInfo);
    13520 }
    13521 
    13522 void vmaDestroyAllocator(
    13523  VmaAllocator allocator)
    13524 {
    13525  if(allocator != VK_NULL_HANDLE)
    13526  {
    13527  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13528  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13529  vma_delete(&allocationCallbacks, allocator);
    13530  }
    13531 }
    13532 
    13534  VmaAllocator allocator,
    13535  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13536 {
    13537  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13538  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13544 {
    13545  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13546  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13547 }
    13548 
    13550  VmaAllocator allocator,
    13551  uint32_t memoryTypeIndex,
    13552  VkMemoryPropertyFlags* pFlags)
    13553 {
    13554  VMA_ASSERT(allocator && pFlags);
    13555  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13556  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13557 }
    13558 
    13560  VmaAllocator allocator,
    13561  uint32_t frameIndex)
    13562 {
    13563  VMA_ASSERT(allocator);
    13564  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13565 
    13566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13567 
    13568  allocator->SetCurrentFrameIndex(frameIndex);
    13569 }
    13570 
    13571 void vmaCalculateStats(
    13572  VmaAllocator allocator,
    13573  VmaStats* pStats)
    13574 {
    13575  VMA_ASSERT(allocator && pStats);
    13576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13577  allocator->CalculateStats(pStats);
    13578 }
    13579 
    13580 #if VMA_STATS_STRING_ENABLED
    13581 
    13582 void vmaBuildStatsString(
    13583  VmaAllocator allocator,
    13584  char** ppStatsString,
    13585  VkBool32 detailedMap)
    13586 {
    13587  VMA_ASSERT(allocator && ppStatsString);
    13588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13589 
    13590  VmaStringBuilder sb(allocator);
    13591  {
    13592  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13593  json.BeginObject();
    13594 
    13595  VmaStats stats;
    13596  allocator->CalculateStats(&stats);
    13597 
    13598  json.WriteString("Total");
    13599  VmaPrintStatInfo(json, stats.total);
    13600 
    13601  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13602  {
    13603  json.BeginString("Heap ");
    13604  json.ContinueString(heapIndex);
    13605  json.EndString();
    13606  json.BeginObject();
    13607 
    13608  json.WriteString("Size");
    13609  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13610 
    13611  json.WriteString("Flags");
    13612  json.BeginArray(true);
    13613  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13614  {
    13615  json.WriteString("DEVICE_LOCAL");
    13616  }
    13617  json.EndArray();
    13618 
    13619  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13620  {
    13621  json.WriteString("Stats");
    13622  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13623  }
    13624 
    13625  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13626  {
    13627  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13628  {
    13629  json.BeginString("Type ");
    13630  json.ContinueString(typeIndex);
    13631  json.EndString();
    13632 
    13633  json.BeginObject();
    13634 
    13635  json.WriteString("Flags");
    13636  json.BeginArray(true);
    13637  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13638  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13639  {
    13640  json.WriteString("DEVICE_LOCAL");
    13641  }
    13642  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13643  {
    13644  json.WriteString("HOST_VISIBLE");
    13645  }
    13646  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13647  {
    13648  json.WriteString("HOST_COHERENT");
    13649  }
    13650  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13651  {
    13652  json.WriteString("HOST_CACHED");
    13653  }
    13654  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13655  {
    13656  json.WriteString("LAZILY_ALLOCATED");
    13657  }
    13658  json.EndArray();
    13659 
    13660  if(stats.memoryType[typeIndex].blockCount > 0)
    13661  {
    13662  json.WriteString("Stats");
    13663  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13664  }
    13665 
    13666  json.EndObject();
    13667  }
    13668  }
    13669 
    13670  json.EndObject();
    13671  }
    13672  if(detailedMap == VK_TRUE)
    13673  {
    13674  allocator->PrintDetailedMap(json);
    13675  }
    13676 
    13677  json.EndObject();
    13678  }
    13679 
    13680  const size_t len = sb.GetLength();
    13681  char* const pChars = vma_new_array(allocator, char, len + 1);
    13682  if(len > 0)
    13683  {
    13684  memcpy(pChars, sb.GetData(), len);
    13685  }
    13686  pChars[len] = '\0';
    13687  *ppStatsString = pChars;
    13688 }
    13689 
    13690 void vmaFreeStatsString(
    13691  VmaAllocator allocator,
    13692  char* pStatsString)
    13693 {
    13694  if(pStatsString != VMA_NULL)
    13695  {
    13696  VMA_ASSERT(allocator);
    13697  size_t len = strlen(pStatsString);
    13698  vma_delete_array(allocator, pStatsString, len + 1);
    13699  }
    13700 }
    13701 
    13702 #endif // #if VMA_STATS_STRING_ENABLED
    13703 
    13704 /*
    13705 This function is not protected by any mutex because it just reads immutable data.
    13706 */
    13707 VkResult vmaFindMemoryTypeIndex(
    13708  VmaAllocator allocator,
    13709  uint32_t memoryTypeBits,
    13710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13711  uint32_t* pMemoryTypeIndex)
    13712 {
    13713  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13714  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13715  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13716 
    13717  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13718  {
    13719  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13720  }
    13721 
    13722  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13723  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13724 
    13725  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13726  if(mapped)
    13727  {
    13728  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13729  }
    13730 
    13731  // Convert usage to requiredFlags and preferredFlags.
    13732  switch(pAllocationCreateInfo->usage)
    13733  {
    13735  break;
    13737  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13738  {
    13739  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13740  }
    13741  break;
    13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13744  break;
    13746  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13747  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13748  {
    13749  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13750  }
    13751  break;
    13753  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13754  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13755  break;
    13756  default:
    13757  break;
    13758  }
    13759 
    13760  *pMemoryTypeIndex = UINT32_MAX;
    13761  uint32_t minCost = UINT32_MAX;
    13762  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13763  memTypeIndex < allocator->GetMemoryTypeCount();
    13764  ++memTypeIndex, memTypeBit <<= 1)
    13765  {
    13766  // This memory type is acceptable according to memoryTypeBits bitmask.
    13767  if((memTypeBit & memoryTypeBits) != 0)
    13768  {
    13769  const VkMemoryPropertyFlags currFlags =
    13770  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13771  // This memory type contains requiredFlags.
    13772  if((requiredFlags & ~currFlags) == 0)
    13773  {
    13774  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13775  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13776  // Remember memory type with lowest cost.
    13777  if(currCost < minCost)
    13778  {
    13779  *pMemoryTypeIndex = memTypeIndex;
    13780  if(currCost == 0)
    13781  {
    13782  return VK_SUCCESS;
    13783  }
    13784  minCost = currCost;
    13785  }
    13786  }
    13787  }
    13788  }
    13789  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13790 }
    13791 
    13793  VmaAllocator allocator,
    13794  const VkBufferCreateInfo* pBufferCreateInfo,
    13795  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13796  uint32_t* pMemoryTypeIndex)
    13797 {
    13798  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13799  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13800  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13802 
    13803  const VkDevice hDev = allocator->m_hDevice;
    13804  VkBuffer hBuffer = VK_NULL_HANDLE;
    13805  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13806  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13807  if(res == VK_SUCCESS)
    13808  {
    13809  VkMemoryRequirements memReq = {};
    13810  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13811  hDev, hBuffer, &memReq);
    13812 
    13813  res = vmaFindMemoryTypeIndex(
    13814  allocator,
    13815  memReq.memoryTypeBits,
    13816  pAllocationCreateInfo,
    13817  pMemoryTypeIndex);
    13818 
    13819  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13820  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13821  }
    13822  return res;
    13823 }
    13824 
    13826  VmaAllocator allocator,
    13827  const VkImageCreateInfo* pImageCreateInfo,
    13828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13829  uint32_t* pMemoryTypeIndex)
    13830 {
    13831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13832  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13833  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13835 
    13836  const VkDevice hDev = allocator->m_hDevice;
    13837  VkImage hImage = VK_NULL_HANDLE;
    13838  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13839  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13840  if(res == VK_SUCCESS)
    13841  {
    13842  VkMemoryRequirements memReq = {};
    13843  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13844  hDev, hImage, &memReq);
    13845 
    13846  res = vmaFindMemoryTypeIndex(
    13847  allocator,
    13848  memReq.memoryTypeBits,
    13849  pAllocationCreateInfo,
    13850  pMemoryTypeIndex);
    13851 
    13852  allocator->GetVulkanFunctions().vkDestroyImage(
    13853  hDev, hImage, allocator->GetAllocationCallbacks());
    13854  }
    13855  return res;
    13856 }
    13857 
    13858 VkResult vmaCreatePool(
    13859  VmaAllocator allocator,
    13860  const VmaPoolCreateInfo* pCreateInfo,
    13861  VmaPool* pPool)
    13862 {
    13863  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13864 
    13865  VMA_DEBUG_LOG("vmaCreatePool");
    13866 
    13867  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13868 
    13869  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13870 
    13871 #if VMA_RECORDING_ENABLED
    13872  if(allocator->GetRecorder() != VMA_NULL)
    13873  {
    13874  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13875  }
    13876 #endif
    13877 
    13878  return res;
    13879 }
    13880 
    13881 void vmaDestroyPool(
    13882  VmaAllocator allocator,
    13883  VmaPool pool)
    13884 {
    13885  VMA_ASSERT(allocator);
    13886 
    13887  if(pool == VK_NULL_HANDLE)
    13888  {
    13889  return;
    13890  }
    13891 
    13892  VMA_DEBUG_LOG("vmaDestroyPool");
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13900  }
    13901 #endif
    13902 
    13903  allocator->DestroyPool(pool);
    13904 }
    13905 
    13906 void vmaGetPoolStats(
    13907  VmaAllocator allocator,
    13908  VmaPool pool,
    13909  VmaPoolStats* pPoolStats)
    13910 {
    13911  VMA_ASSERT(allocator && pool && pPoolStats);
    13912 
    13913  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13914 
    13915  allocator->GetPoolStats(pool, pPoolStats);
    13916 }
    13917 
    13919  VmaAllocator allocator,
    13920  VmaPool pool,
    13921  size_t* pLostAllocationCount)
    13922 {
    13923  VMA_ASSERT(allocator && pool);
    13924 
    13925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13926 
    13927 #if VMA_RECORDING_ENABLED
    13928  if(allocator->GetRecorder() != VMA_NULL)
    13929  {
    13930  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13931  }
    13932 #endif
    13933 
    13934  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13935 }
    13936 
    13937 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13938 {
    13939  VMA_ASSERT(allocator && pool);
    13940 
    13941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13942 
    13943  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13944 
    13945  return allocator->CheckPoolCorruption(pool);
    13946 }
    13947 
    13948 VkResult vmaAllocateMemory(
    13949  VmaAllocator allocator,
    13950  const VkMemoryRequirements* pVkMemoryRequirements,
    13951  const VmaAllocationCreateInfo* pCreateInfo,
    13952  VmaAllocation* pAllocation,
    13953  VmaAllocationInfo* pAllocationInfo)
    13954 {
    13955  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13956 
    13957  VMA_DEBUG_LOG("vmaAllocateMemory");
    13958 
    13959  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13960 
    13961  VkResult result = allocator->AllocateMemory(
    13962  *pVkMemoryRequirements,
    13963  false, // requiresDedicatedAllocation
    13964  false, // prefersDedicatedAllocation
    13965  VK_NULL_HANDLE, // dedicatedBuffer
    13966  VK_NULL_HANDLE, // dedicatedImage
    13967  *pCreateInfo,
    13968  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13969  pAllocation);
    13970 
    13971 #if VMA_RECORDING_ENABLED
    13972  if(allocator->GetRecorder() != VMA_NULL)
    13973  {
    13974  allocator->GetRecorder()->RecordAllocateMemory(
    13975  allocator->GetCurrentFrameIndex(),
    13976  *pVkMemoryRequirements,
    13977  *pCreateInfo,
    13978  *pAllocation);
    13979  }
    13980 #endif
    13981 
    13982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13983  {
    13984  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13985  }
    13986 
    13987  return result;
    13988 }
    13989 
    13991  VmaAllocator allocator,
    13992  VkBuffer buffer,
    13993  const VmaAllocationCreateInfo* pCreateInfo,
    13994  VmaAllocation* pAllocation,
    13995  VmaAllocationInfo* pAllocationInfo)
    13996 {
    13997  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13998 
    13999  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14000 
    14001  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14002 
    14003  VkMemoryRequirements vkMemReq = {};
    14004  bool requiresDedicatedAllocation = false;
    14005  bool prefersDedicatedAllocation = false;
    14006  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation);
    14009 
    14010  VkResult result = allocator->AllocateMemory(
    14011  vkMemReq,
    14012  requiresDedicatedAllocation,
    14013  prefersDedicatedAllocation,
    14014  buffer, // dedicatedBuffer
    14015  VK_NULL_HANDLE, // dedicatedImage
    14016  *pCreateInfo,
    14017  VMA_SUBALLOCATION_TYPE_BUFFER,
    14018  pAllocation);
    14019 
    14020 #if VMA_RECORDING_ENABLED
    14021  if(allocator->GetRecorder() != VMA_NULL)
    14022  {
    14023  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14024  allocator->GetCurrentFrameIndex(),
    14025  vkMemReq,
    14026  requiresDedicatedAllocation,
    14027  prefersDedicatedAllocation,
    14028  *pCreateInfo,
    14029  *pAllocation);
    14030  }
    14031 #endif
    14032 
    14033  if(pAllocationInfo && result == VK_SUCCESS)
    14034  {
    14035  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14036  }
    14037 
    14038  return result;
    14039 }
    14040 
    14041 VkResult vmaAllocateMemoryForImage(
    14042  VmaAllocator allocator,
    14043  VkImage image,
    14044  const VmaAllocationCreateInfo* pCreateInfo,
    14045  VmaAllocation* pAllocation,
    14046  VmaAllocationInfo* pAllocationInfo)
    14047 {
    14048  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14049 
    14050  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  VkMemoryRequirements vkMemReq = {};
    14055  bool requiresDedicatedAllocation = false;
    14056  bool prefersDedicatedAllocation = false;
    14057  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14058  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14059 
    14060  VkResult result = allocator->AllocateMemory(
    14061  vkMemReq,
    14062  requiresDedicatedAllocation,
    14063  prefersDedicatedAllocation,
    14064  VK_NULL_HANDLE, // dedicatedBuffer
    14065  image, // dedicatedImage
    14066  *pCreateInfo,
    14067  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14068  pAllocation);
    14069 
    14070 #if VMA_RECORDING_ENABLED
    14071  if(allocator->GetRecorder() != VMA_NULL)
    14072  {
    14073  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14074  allocator->GetCurrentFrameIndex(),
    14075  vkMemReq,
    14076  requiresDedicatedAllocation,
    14077  prefersDedicatedAllocation,
    14078  *pCreateInfo,
    14079  *pAllocation);
    14080  }
    14081 #endif
    14082 
    14083  if(pAllocationInfo && result == VK_SUCCESS)
    14084  {
    14085  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14086  }
    14087 
    14088  return result;
    14089 }
    14090 
    14091 void vmaFreeMemory(
    14092  VmaAllocator allocator,
    14093  VmaAllocation allocation)
    14094 {
    14095  VMA_ASSERT(allocator);
    14096 
    14097  if(allocation == VK_NULL_HANDLE)
    14098  {
    14099  return;
    14100  }
    14101 
    14102  VMA_DEBUG_LOG("vmaFreeMemory");
    14103 
    14104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14105 
    14106 #if VMA_RECORDING_ENABLED
    14107  if(allocator->GetRecorder() != VMA_NULL)
    14108  {
    14109  allocator->GetRecorder()->RecordFreeMemory(
    14110  allocator->GetCurrentFrameIndex(),
    14111  allocation);
    14112  }
    14113 #endif
    14114 
    14115  allocator->FreeMemory(allocation);
    14116 }
    14117 
    14118 VkResult vmaResizeAllocation(
    14119  VmaAllocator allocator,
    14120  VmaAllocation allocation,
    14121  VkDeviceSize newSize)
    14122 {
    14123  VMA_ASSERT(allocator && allocation);
    14124 
    14125  VMA_DEBUG_LOG("vmaResizeAllocation");
    14126 
    14127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14128 
    14129 #if VMA_RECORDING_ENABLED
    14130  if(allocator->GetRecorder() != VMA_NULL)
    14131  {
    14132  allocator->GetRecorder()->RecordResizeAllocation(
    14133  allocator->GetCurrentFrameIndex(),
    14134  allocation,
    14135  newSize);
    14136  }
    14137 #endif
    14138 
    14139  return allocator->ResizeAllocation(allocation, newSize);
    14140 }
    14141 
    14143  VmaAllocator allocator,
    14144  VmaAllocation allocation,
    14145  VmaAllocationInfo* pAllocationInfo)
    14146 {
    14147  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14148 
    14149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14150 
    14151 #if VMA_RECORDING_ENABLED
    14152  if(allocator->GetRecorder() != VMA_NULL)
    14153  {
    14154  allocator->GetRecorder()->RecordGetAllocationInfo(
    14155  allocator->GetCurrentFrameIndex(),
    14156  allocation);
    14157  }
    14158 #endif
    14159 
    14160  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14161 }
    14162 
    14163 VkBool32 vmaTouchAllocation(
    14164  VmaAllocator allocator,
    14165  VmaAllocation allocation)
    14166 {
    14167  VMA_ASSERT(allocator && allocation);
    14168 
    14169  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14170 
    14171 #if VMA_RECORDING_ENABLED
    14172  if(allocator->GetRecorder() != VMA_NULL)
    14173  {
    14174  allocator->GetRecorder()->RecordTouchAllocation(
    14175  allocator->GetCurrentFrameIndex(),
    14176  allocation);
    14177  }
    14178 #endif
    14179 
    14180  return allocator->TouchAllocation(allocation);
    14181 }
    14182 
    14184  VmaAllocator allocator,
    14185  VmaAllocation allocation,
    14186  void* pUserData)
    14187 {
    14188  VMA_ASSERT(allocator && allocation);
    14189 
    14190  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14191 
    14192  allocation->SetUserData(allocator, pUserData);
    14193 
    14194 #if VMA_RECORDING_ENABLED
    14195  if(allocator->GetRecorder() != VMA_NULL)
    14196  {
    14197  allocator->GetRecorder()->RecordSetAllocationUserData(
    14198  allocator->GetCurrentFrameIndex(),
    14199  allocation,
    14200  pUserData);
    14201  }
    14202 #endif
    14203 }
    14204 
    14206  VmaAllocator allocator,
    14207  VmaAllocation* pAllocation)
    14208 {
    14209  VMA_ASSERT(allocator && pAllocation);
    14210 
    14211  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14212 
    14213  allocator->CreateLostAllocation(pAllocation);
    14214 
    14215 #if VMA_RECORDING_ENABLED
    14216  if(allocator->GetRecorder() != VMA_NULL)
    14217  {
    14218  allocator->GetRecorder()->RecordCreateLostAllocation(
    14219  allocator->GetCurrentFrameIndex(),
    14220  *pAllocation);
    14221  }
    14222 #endif
    14223 }
    14224 
    14225 VkResult vmaMapMemory(
    14226  VmaAllocator allocator,
    14227  VmaAllocation allocation,
    14228  void** ppData)
    14229 {
    14230  VMA_ASSERT(allocator && allocation && ppData);
    14231 
    14232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14233 
    14234  VkResult res = allocator->Map(allocation, ppData);
    14235 
    14236 #if VMA_RECORDING_ENABLED
    14237  if(allocator->GetRecorder() != VMA_NULL)
    14238  {
    14239  allocator->GetRecorder()->RecordMapMemory(
    14240  allocator->GetCurrentFrameIndex(),
    14241  allocation);
    14242  }
    14243 #endif
    14244 
    14245  return res;
    14246 }
    14247 
    14248 void vmaUnmapMemory(
    14249  VmaAllocator allocator,
    14250  VmaAllocation allocation)
    14251 {
    14252  VMA_ASSERT(allocator && allocation);
    14253 
    14254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14255 
    14256 #if VMA_RECORDING_ENABLED
    14257  if(allocator->GetRecorder() != VMA_NULL)
    14258  {
    14259  allocator->GetRecorder()->RecordUnmapMemory(
    14260  allocator->GetCurrentFrameIndex(),
    14261  allocation);
    14262  }
    14263 #endif
    14264 
    14265  allocator->Unmap(allocation);
    14266 }
    14267 
    14268 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14269 {
    14270  VMA_ASSERT(allocator && allocation);
    14271 
    14272  VMA_DEBUG_LOG("vmaFlushAllocation");
    14273 
    14274  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14275 
    14276  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14277 
    14278 #if VMA_RECORDING_ENABLED
    14279  if(allocator->GetRecorder() != VMA_NULL)
    14280  {
    14281  allocator->GetRecorder()->RecordFlushAllocation(
    14282  allocator->GetCurrentFrameIndex(),
    14283  allocation, offset, size);
    14284  }
    14285 #endif
    14286 }
    14287 
    14288 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14289 {
    14290  VMA_ASSERT(allocator && allocation);
    14291 
    14292  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14293 
    14294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14295 
    14296  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14297 
    14298 #if VMA_RECORDING_ENABLED
    14299  if(allocator->GetRecorder() != VMA_NULL)
    14300  {
    14301  allocator->GetRecorder()->RecordInvalidateAllocation(
    14302  allocator->GetCurrentFrameIndex(),
    14303  allocation, offset, size);
    14304  }
    14305 #endif
    14306 }
    14307 
    14308 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14309 {
    14310  VMA_ASSERT(allocator);
    14311 
    14312  VMA_DEBUG_LOG("vmaCheckCorruption");
    14313 
    14314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14315 
    14316  return allocator->CheckCorruption(memoryTypeBits);
    14317 }
    14318 
    14319 VkResult vmaDefragment(
    14320  VmaAllocator allocator,
    14321  VmaAllocation* pAllocations,
    14322  size_t allocationCount,
    14323  VkBool32* pAllocationsChanged,
    14324  const VmaDefragmentationInfo *pDefragmentationInfo,
    14325  VmaDefragmentationStats* pDefragmentationStats)
    14326 {
    14327  VMA_ASSERT(allocator && pAllocations);
    14328 
    14329  VMA_DEBUG_LOG("vmaDefragment");
    14330 
    14331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14332 
    14333  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14334 }
    14335 
    14336 VkResult vmaBindBufferMemory(
    14337  VmaAllocator allocator,
    14338  VmaAllocation allocation,
    14339  VkBuffer buffer)
    14340 {
    14341  VMA_ASSERT(allocator && allocation && buffer);
    14342 
    14343  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14344 
    14345  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14346 
    14347  return allocator->BindBufferMemory(allocation, buffer);
    14348 }
    14349 
    14350 VkResult vmaBindImageMemory(
    14351  VmaAllocator allocator,
    14352  VmaAllocation allocation,
    14353  VkImage image)
    14354 {
    14355  VMA_ASSERT(allocator && allocation && image);
    14356 
    14357  VMA_DEBUG_LOG("vmaBindImageMemory");
    14358 
    14359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14360 
    14361  return allocator->BindImageMemory(allocation, image);
    14362 }
    14363 
    14364 VkResult vmaCreateBuffer(
    14365  VmaAllocator allocator,
    14366  const VkBufferCreateInfo* pBufferCreateInfo,
    14367  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14368  VkBuffer* pBuffer,
    14369  VmaAllocation* pAllocation,
    14370  VmaAllocationInfo* pAllocationInfo)
    14371 {
    14372  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14373 
    14374  if(pBufferCreateInfo->size == 0)
    14375  {
    14376  return VK_ERROR_VALIDATION_FAILED_EXT;
    14377  }
    14378 
    14379  VMA_DEBUG_LOG("vmaCreateBuffer");
    14380 
    14381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14382 
    14383  *pBuffer = VK_NULL_HANDLE;
    14384  *pAllocation = VK_NULL_HANDLE;
    14385 
    14386  // 1. Create VkBuffer.
    14387  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14388  allocator->m_hDevice,
    14389  pBufferCreateInfo,
    14390  allocator->GetAllocationCallbacks(),
    14391  pBuffer);
    14392  if(res >= 0)
    14393  {
    14394  // 2. vkGetBufferMemoryRequirements.
    14395  VkMemoryRequirements vkMemReq = {};
    14396  bool requiresDedicatedAllocation = false;
    14397  bool prefersDedicatedAllocation = false;
    14398  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14399  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14400 
    14401  // Make sure alignment requirements for specific buffer usages reported
    14402  // in Physical Device Properties are included in alignment reported by memory requirements.
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14412  }
    14413  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14414  {
    14415  VMA_ASSERT(vkMemReq.alignment %
    14416  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14417  }
    14418 
    14419  // 3. Allocate memory using allocator.
    14420  res = allocator->AllocateMemory(
    14421  vkMemReq,
    14422  requiresDedicatedAllocation,
    14423  prefersDedicatedAllocation,
    14424  *pBuffer, // dedicatedBuffer
    14425  VK_NULL_HANDLE, // dedicatedImage
    14426  *pAllocationCreateInfo,
    14427  VMA_SUBALLOCATION_TYPE_BUFFER,
    14428  pAllocation);
    14429 
    14430 #if VMA_RECORDING_ENABLED
    14431  if(allocator->GetRecorder() != VMA_NULL)
    14432  {
    14433  allocator->GetRecorder()->RecordCreateBuffer(
    14434  allocator->GetCurrentFrameIndex(),
    14435  *pBufferCreateInfo,
    14436  *pAllocationCreateInfo,
    14437  *pAllocation);
    14438  }
    14439 #endif
    14440 
    14441  if(res >= 0)
    14442  {
    14443  // 3. Bind buffer with memory.
    14444  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14445  if(res >= 0)
    14446  {
    14447  // All steps succeeded.
    14448  #if VMA_STATS_STRING_ENABLED
    14449  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14450  #endif
    14451  if(pAllocationInfo != VMA_NULL)
    14452  {
    14453  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14454  }
    14455 
    14456  return VK_SUCCESS;
    14457  }
    14458  allocator->FreeMemory(*pAllocation);
    14459  *pAllocation = VK_NULL_HANDLE;
    14460  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14461  *pBuffer = VK_NULL_HANDLE;
    14462  return res;
    14463  }
    14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14465  *pBuffer = VK_NULL_HANDLE;
    14466  return res;
    14467  }
    14468  return res;
    14469 }
    14470 
    14471 void vmaDestroyBuffer(
    14472  VmaAllocator allocator,
    14473  VkBuffer buffer,
    14474  VmaAllocation allocation)
    14475 {
    14476  VMA_ASSERT(allocator);
    14477 
    14478  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14479  {
    14480  return;
    14481  }
    14482 
    14483  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14484 
    14485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14486 
    14487 #if VMA_RECORDING_ENABLED
    14488  if(allocator->GetRecorder() != VMA_NULL)
    14489  {
    14490  allocator->GetRecorder()->RecordDestroyBuffer(
    14491  allocator->GetCurrentFrameIndex(),
    14492  allocation);
    14493  }
    14494 #endif
    14495 
    14496  if(buffer != VK_NULL_HANDLE)
    14497  {
    14498  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14499  }
    14500 
    14501  if(allocation != VK_NULL_HANDLE)
    14502  {
    14503  allocator->FreeMemory(allocation);
    14504  }
    14505 }
    14506 
    14507 VkResult vmaCreateImage(
    14508  VmaAllocator allocator,
    14509  const VkImageCreateInfo* pImageCreateInfo,
    14510  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14511  VkImage* pImage,
    14512  VmaAllocation* pAllocation,
    14513  VmaAllocationInfo* pAllocationInfo)
    14514 {
    14515  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14516 
    14517  if(pImageCreateInfo->extent.width == 0 ||
    14518  pImageCreateInfo->extent.height == 0 ||
    14519  pImageCreateInfo->extent.depth == 0 ||
    14520  pImageCreateInfo->mipLevels == 0 ||
    14521  pImageCreateInfo->arrayLayers == 0)
    14522  {
    14523  return VK_ERROR_VALIDATION_FAILED_EXT;
    14524  }
    14525 
    14526  VMA_DEBUG_LOG("vmaCreateImage");
    14527 
    14528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14529 
    14530  *pImage = VK_NULL_HANDLE;
    14531  *pAllocation = VK_NULL_HANDLE;
    14532 
    14533  // 1. Create VkImage.
    14534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14535  allocator->m_hDevice,
    14536  pImageCreateInfo,
    14537  allocator->GetAllocationCallbacks(),
    14538  pImage);
    14539  if(res >= 0)
    14540  {
    14541  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14542  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14544 
    14545  // 2. Allocate memory using allocator.
    14546  VkMemoryRequirements vkMemReq = {};
    14547  bool requiresDedicatedAllocation = false;
    14548  bool prefersDedicatedAllocation = false;
    14549  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14550  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14551 
    14552  res = allocator->AllocateMemory(
    14553  vkMemReq,
    14554  requiresDedicatedAllocation,
    14555  prefersDedicatedAllocation,
    14556  VK_NULL_HANDLE, // dedicatedBuffer
    14557  *pImage, // dedicatedImage
    14558  *pAllocationCreateInfo,
    14559  suballocType,
    14560  pAllocation);
    14561 
    14562 #if VMA_RECORDING_ENABLED
    14563  if(allocator->GetRecorder() != VMA_NULL)
    14564  {
    14565  allocator->GetRecorder()->RecordCreateImage(
    14566  allocator->GetCurrentFrameIndex(),
    14567  *pImageCreateInfo,
    14568  *pAllocationCreateInfo,
    14569  *pAllocation);
    14570  }
    14571 #endif
    14572 
    14573  if(res >= 0)
    14574  {
    14575  // 3. Bind image with memory.
    14576  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14577  if(res >= 0)
    14578  {
    14579  // All steps succeeded.
    14580  #if VMA_STATS_STRING_ENABLED
    14581  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14582  #endif
    14583  if(pAllocationInfo != VMA_NULL)
    14584  {
    14585  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14586  }
    14587 
    14588  return VK_SUCCESS;
    14589  }
    14590  allocator->FreeMemory(*pAllocation);
    14591  *pAllocation = VK_NULL_HANDLE;
    14592  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14593  *pImage = VK_NULL_HANDLE;
    14594  return res;
    14595  }
    14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14597  *pImage = VK_NULL_HANDLE;
    14598  return res;
    14599  }
    14600  return res;
    14601 }
    14602 
    14603 void vmaDestroyImage(
    14604  VmaAllocator allocator,
    14605  VkImage image,
    14606  VmaAllocation allocation)
    14607 {
    14608  VMA_ASSERT(allocator);
    14609 
    14610  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14611  {
    14612  return;
    14613  }
    14614 
    14615  VMA_DEBUG_LOG("vmaDestroyImage");
    14616 
    14617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14618 
    14619 #if VMA_RECORDING_ENABLED
    14620  if(allocator->GetRecorder() != VMA_NULL)
    14621  {
    14622  allocator->GetRecorder()->RecordDestroyImage(
    14623  allocator->GetCurrentFrameIndex(),
    14624  allocation);
    14625  }
    14626 #endif
    14627 
    14628  if(image != VK_NULL_HANDLE)
    14629  {
    14630  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14631  }
    14632  if(allocation != VK_NULL_HANDLE)
    14633  {
    14634  allocator->FreeMemory(allocation);
    14635  }
    14636 }
    14637 
    14638 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1887
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1643
    @@ -73,26 +73,26 @@ $(function() {
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    Definition: vk_mem_alloc.h:1617
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2209
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2212
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1598
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1844
    Definition: vk_mem_alloc.h:1947
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1590
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2309
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2312
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1640
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2579
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2098
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2582
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2101
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1487
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2190
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2193
    Definition: vk_mem_alloc.h:1924
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1579
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:1997
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2000
    Definition: vk_mem_alloc.h:1871
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1652
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2126
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2129
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1705
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1637
    @@ -102,41 +102,41 @@ $(function() {
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1777
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1595
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1776
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2583
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2586
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1669
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1786
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2591
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1981
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2574
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2594
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1984
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2577
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1596
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1521
    Represents main object of this library initialized.
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1646
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2140
    -
    Definition: vk_mem_alloc.h:2134
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2143
    +
    Definition: vk_mem_alloc.h:2137
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1712
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2319
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2322
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1591
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1615
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2018
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2160
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2196
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2021
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2163
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2199
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1577
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2143
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2146
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1822
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2569
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2572
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2587
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2590
    Definition: vk_mem_alloc.h:1861
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2005
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2008
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1594
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    @@ -144,43 +144,43 @@ $(function() {
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1782
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1527
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1548
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1619
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1553
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2589
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2592
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1992
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2206
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1995
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2209
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1587
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1765
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2155
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2158
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1540
    -
    Definition: vk_mem_alloc.h:2130
    +
    Definition: vk_mem_alloc.h:2133
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Definition: vk_mem_alloc.h:1931
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1778
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1544
    -
    Definition: vk_mem_alloc.h:1955
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2146
    +
    Definition: vk_mem_alloc.h:1958
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2149
    Definition: vk_mem_alloc.h:1870
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1593
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1987
    -
    Definition: vk_mem_alloc.h:1978
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1990
    +
    Definition: vk_mem_alloc.h:1981
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1768
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1589
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2168
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2171
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1655
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2199
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1976
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2011
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2202
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1979
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2014
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1693
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1784
    @@ -192,62 +192,62 @@ $(function() {
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1599
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2182
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2185
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1592
    Definition: vk_mem_alloc.h:1942
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1633
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2333
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2336
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1649
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1777
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1774
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2187
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2190
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    Definition: vk_mem_alloc.h:1951
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2314
    -
    Definition: vk_mem_alloc.h:1962
    -
    Definition: vk_mem_alloc.h:1974
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2585
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2317
    +
    Definition: vk_mem_alloc.h:1965
    +
    Definition: vk_mem_alloc.h:1977
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2588
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1585
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1772
    Definition: vk_mem_alloc.h:1827
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2136
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2139
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1622
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1770
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1597
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1601
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1898
    -
    Definition: vk_mem_alloc.h:1969
    +
    Definition: vk_mem_alloc.h:1972
    Definition: vk_mem_alloc.h:1854
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2328
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2331
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1575
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1588
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2115
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2118
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2295
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2298
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1959
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2080
    +
    Definition: vk_mem_alloc.h:1962
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2083
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1778
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1609
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1785
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2193
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2196
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1778
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2300
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2303
    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index 96303b9..bd37171 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,189 +65,189 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1693  const VkDeviceSize* pHeapSizeLimit;
    1714 
    1716 VkResult vmaCreateAllocator(
    1717  const VmaAllocatorCreateInfo* pCreateInfo,
    1718  VmaAllocator* pAllocator);
    1719 
    1721 void vmaDestroyAllocator(
    1722  VmaAllocator allocator);
    1723 
    1729  VmaAllocator allocator,
    1730  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1731 
    1737  VmaAllocator allocator,
    1738  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1739 
    1747  VmaAllocator allocator,
    1748  uint32_t memoryTypeIndex,
    1749  VkMemoryPropertyFlags* pFlags);
    1750 
    1760  VmaAllocator allocator,
    1761  uint32_t frameIndex);
    1762 
    1765 typedef struct VmaStatInfo
    1766 {
    1768  uint32_t blockCount;
    1774  VkDeviceSize usedBytes;
    1776  VkDeviceSize unusedBytes;
    1779 } VmaStatInfo;
    1780 
    1782 typedef struct VmaStats
    1783 {
    1784  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1785  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1787 } VmaStats;
    1788 
    1790 void vmaCalculateStats(
    1791  VmaAllocator allocator,
    1792  VmaStats* pStats);
    1793 
    1794 #define VMA_STATS_STRING_ENABLED 1
    1795 
    1796 #if VMA_STATS_STRING_ENABLED
    1797 
    1799 
    1801 void vmaBuildStatsString(
    1802  VmaAllocator allocator,
    1803  char** ppStatsString,
    1804  VkBool32 detailedMap);
    1805 
    1806 void vmaFreeStatsString(
    1807  VmaAllocator allocator,
    1808  char* pStatsString);
    1809 
    1810 #endif // #if VMA_STATS_STRING_ENABLED
    1811 
    1820 VK_DEFINE_HANDLE(VmaPool)
    1821 
    1822 typedef enum VmaMemoryUsage
    1823 {
    1872 } VmaMemoryUsage;
    1873 
    1888 
    1943 
    1959 
    1969 
    1976 
    1980 
    1982 {
    1995  VkMemoryPropertyFlags requiredFlags;
    2000  VkMemoryPropertyFlags preferredFlags;
    2008  uint32_t memoryTypeBits;
    2021  void* pUserData;
    2023 
    2040 VkResult vmaFindMemoryTypeIndex(
    2041  VmaAllocator allocator,
    2042  uint32_t memoryTypeBits,
    2043  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2044  uint32_t* pMemoryTypeIndex);
    2045 
    2059  VmaAllocator allocator,
    2060  const VkBufferCreateInfo* pBufferCreateInfo,
    2061  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2062  uint32_t* pMemoryTypeIndex);
    2063 
    2077  VmaAllocator allocator,
    2078  const VkImageCreateInfo* pImageCreateInfo,
    2079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2080  uint32_t* pMemoryTypeIndex);
    2081 
    2102 
    2119 
    2130 
    2136 
    2139 typedef VkFlags VmaPoolCreateFlags;
    2140 
    2143 typedef struct VmaPoolCreateInfo {
    2158  VkDeviceSize blockSize;
    2187 
    2190 typedef struct VmaPoolStats {
    2193  VkDeviceSize size;
    2196  VkDeviceSize unusedSize;
    2209  VkDeviceSize unusedRangeSizeMax;
    2212  size_t blockCount;
    2213 } VmaPoolStats;
    2214 
    2221 VkResult vmaCreatePool(
    2222  VmaAllocator allocator,
    2223  const VmaPoolCreateInfo* pCreateInfo,
    2224  VmaPool* pPool);
    2225 
    2228 void vmaDestroyPool(
    2229  VmaAllocator allocator,
    2230  VmaPool pool);
    2231 
    2238 void vmaGetPoolStats(
    2239  VmaAllocator allocator,
    2240  VmaPool pool,
    2241  VmaPoolStats* pPoolStats);
    2242 
    2250  VmaAllocator allocator,
    2251  VmaPool pool,
    2252  size_t* pLostAllocationCount);
    2253 
    2268 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2269 
    2294 VK_DEFINE_HANDLE(VmaAllocation)
    2295 
    2296 
    2298 typedef struct VmaAllocationInfo {
    2303  uint32_t memoryType;
    2312  VkDeviceMemory deviceMemory;
    2317  VkDeviceSize offset;
    2322  VkDeviceSize size;
    2336  void* pUserData;
    2338 
    2349 VkResult vmaAllocateMemory(
    2350  VmaAllocator allocator,
    2351  const VkMemoryRequirements* pVkMemoryRequirements,
    2352  const VmaAllocationCreateInfo* pCreateInfo,
    2353  VmaAllocation* pAllocation,
    2354  VmaAllocationInfo* pAllocationInfo);
    2355 
    2363  VmaAllocator allocator,
    2364  VkBuffer buffer,
    2365  const VmaAllocationCreateInfo* pCreateInfo,
    2366  VmaAllocation* pAllocation,
    2367  VmaAllocationInfo* pAllocationInfo);
    2368 
    2370 VkResult vmaAllocateMemoryForImage(
    2371  VmaAllocator allocator,
    2372  VkImage image,
    2373  const VmaAllocationCreateInfo* pCreateInfo,
    2374  VmaAllocation* pAllocation,
    2375  VmaAllocationInfo* pAllocationInfo);
    2376 
    2378 void vmaFreeMemory(
    2379  VmaAllocator allocator,
    2380  VmaAllocation allocation);
    2381 
    2402 VkResult vmaResizeAllocation(
    2403  VmaAllocator allocator,
    2404  VmaAllocation allocation,
    2405  VkDeviceSize newSize);
    2406 
    2424  VmaAllocator allocator,
    2425  VmaAllocation allocation,
    2426  VmaAllocationInfo* pAllocationInfo);
    2427 
    2442 VkBool32 vmaTouchAllocation(
    2443  VmaAllocator allocator,
    2444  VmaAllocation allocation);
    2445 
    2460  VmaAllocator allocator,
    2461  VmaAllocation allocation,
    2462  void* pUserData);
    2463 
    2475  VmaAllocator allocator,
    2476  VmaAllocation* pAllocation);
    2477 
    2512 VkResult vmaMapMemory(
    2513  VmaAllocator allocator,
    2514  VmaAllocation allocation,
    2515  void** ppData);
    2516 
    2521 void vmaUnmapMemory(
    2522  VmaAllocator allocator,
    2523  VmaAllocation allocation);
    2524 
    2537 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2538 
    2551 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2552 
    2569 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2570 
    2572 typedef struct VmaDefragmentationInfo {
    2577  VkDeviceSize maxBytesToMove;
    2584 
    2586 typedef struct VmaDefragmentationStats {
    2588  VkDeviceSize bytesMoved;
    2590  VkDeviceSize bytesFreed;
    2596 
    2635 VkResult vmaDefragment(
    2636  VmaAllocator allocator,
    2637  VmaAllocation* pAllocations,
    2638  size_t allocationCount,
    2639  VkBool32* pAllocationsChanged,
    2640  const VmaDefragmentationInfo *pDefragmentationInfo,
    2641  VmaDefragmentationStats* pDefragmentationStats);
    2642 
    2655 VkResult vmaBindBufferMemory(
    2656  VmaAllocator allocator,
    2657  VmaAllocation allocation,
    2658  VkBuffer buffer);
    2659 
    2672 VkResult vmaBindImageMemory(
    2673  VmaAllocator allocator,
    2674  VmaAllocation allocation,
    2675  VkImage image);
    2676 
    2703 VkResult vmaCreateBuffer(
    2704  VmaAllocator allocator,
    2705  const VkBufferCreateInfo* pBufferCreateInfo,
    2706  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2707  VkBuffer* pBuffer,
    2708  VmaAllocation* pAllocation,
    2709  VmaAllocationInfo* pAllocationInfo);
    2710 
    2722 void vmaDestroyBuffer(
    2723  VmaAllocator allocator,
    2724  VkBuffer buffer,
    2725  VmaAllocation allocation);
    2726 
    2728 VkResult vmaCreateImage(
    2729  VmaAllocator allocator,
    2730  const VkImageCreateInfo* pImageCreateInfo,
    2731  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2732  VkImage* pImage,
    2733  VmaAllocation* pAllocation,
    2734  VmaAllocationInfo* pAllocationInfo);
    2735 
    2747 void vmaDestroyImage(
    2748  VmaAllocator allocator,
    2749  VkImage image,
    2750  VmaAllocation allocation);
    2751 
    2752 #ifdef __cplusplus
    2753 }
    2754 #endif
    2755 
    2756 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2757 
    2758 // For Visual Studio IntelliSense.
    2759 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2760 #define VMA_IMPLEMENTATION
    2761 #endif
    2762 
    2763 #ifdef VMA_IMPLEMENTATION
    2764 #undef VMA_IMPLEMENTATION
    2765 
    2766 #include <cstdint>
    2767 #include <cstdlib>
    2768 #include <cstring>
    2769 
    2770 /*******************************************************************************
    2771 CONFIGURATION SECTION
    2772 
    2773 Define some of these macros before each #include of this header or change them
    2774 here if you need other then default behavior depending on your environment.
    2775 */
    2776 
    2777 /*
    2778 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2779 internally, like:
    2780 
    2781  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2782 
    2783 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2784 VmaAllocatorCreateInfo::pVulkanFunctions.
    2785 */
    2786 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2787 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2788 #endif
    2789 
    2790 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2791 //#define VMA_USE_STL_CONTAINERS 1
    2792 
    2793 /* Set this macro to 1 to make the library including and using STL containers:
    2794 std::pair, std::vector, std::list, std::unordered_map.
    2795 
    2796 Set it to 0 or undefined to make the library using its own implementation of
    2797 the containers.
    2798 */
    2799 #if VMA_USE_STL_CONTAINERS
    2800  #define VMA_USE_STL_VECTOR 1
    2801  #define VMA_USE_STL_UNORDERED_MAP 1
    2802  #define VMA_USE_STL_LIST 1
    2803 #endif
    2804 
    2805 #if VMA_USE_STL_VECTOR
    2806  #include <vector>
    2807 #endif
    2808 
    2809 #if VMA_USE_STL_UNORDERED_MAP
    2810  #include <unordered_map>
    2811 #endif
    2812 
    2813 #if VMA_USE_STL_LIST
    2814  #include <list>
    2815 #endif
    2816 
    2817 /*
    2818 Following headers are used in this CONFIGURATION section only, so feel free to
    2819 remove them if not needed.
    2820 */
    2821 #include <cassert> // for assert
    2822 #include <algorithm> // for min, max
    2823 #include <mutex> // for std::mutex
    2824 #include <atomic> // for std::atomic
    2825 
    2826 #ifndef VMA_NULL
    2827  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2828  #define VMA_NULL nullptr
    2829 #endif
    2830 
    2831 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2832 #include <cstdlib>
    2833 void *aligned_alloc(size_t alignment, size_t size)
    2834 {
    2835  // alignment must be >= sizeof(void*)
    2836  if(alignment < sizeof(void*))
    2837  {
    2838  alignment = sizeof(void*);
    2839  }
    2840 
    2841  return memalign(alignment, size);
    2842 }
    2843 #elif defined(__APPLE__) || defined(__ANDROID__)
    2844 #include <cstdlib>
    2845 void *aligned_alloc(size_t alignment, size_t size)
    2846 {
    2847  // alignment must be >= sizeof(void*)
    2848  if(alignment < sizeof(void*))
    2849  {
    2850  alignment = sizeof(void*);
    2851  }
    2852 
    2853  void *pointer;
    2854  if(posix_memalign(&pointer, alignment, size) == 0)
    2855  return pointer;
    2856  return VMA_NULL;
    2857 }
    2858 #endif
    2859 
    2860 // If your compiler is not compatible with C++11 and definition of
    2861 // aligned_alloc() function is missing, uncommeting following line may help:
    2862 
    2863 //#include <malloc.h>
    2864 
    2865 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2866 #ifndef VMA_ASSERT
    2867  #ifdef _DEBUG
    2868  #define VMA_ASSERT(expr) assert(expr)
    2869  #else
    2870  #define VMA_ASSERT(expr)
    2871  #endif
    2872 #endif
    2873 
    2874 // Assert that will be called very often, like inside data structures e.g. operator[].
    2875 // Making it non-empty can make program slow.
    2876 #ifndef VMA_HEAVY_ASSERT
    2877  #ifdef _DEBUG
    2878  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2879  #else
    2880  #define VMA_HEAVY_ASSERT(expr)
    2881  #endif
    2882 #endif
    2883 
    2884 #ifndef VMA_ALIGN_OF
    2885  #define VMA_ALIGN_OF(type) (__alignof(type))
    2886 #endif
    2887 
    2888 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2889  #if defined(_WIN32)
    2890  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2891  #else
    2892  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2893  #endif
    2894 #endif
    2895 
    2896 #ifndef VMA_SYSTEM_FREE
    2897  #if defined(_WIN32)
    2898  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2899  #else
    2900  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2901  #endif
    2902 #endif
    2903 
    2904 #ifndef VMA_MIN
    2905  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2906 #endif
    2907 
    2908 #ifndef VMA_MAX
    2909  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2910 #endif
    2911 
    2912 #ifndef VMA_SWAP
    2913  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2914 #endif
    2915 
    2916 #ifndef VMA_SORT
    2917  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2918 #endif
    2919 
    2920 #ifndef VMA_DEBUG_LOG
    2921  #define VMA_DEBUG_LOG(format, ...)
    2922  /*
    2923  #define VMA_DEBUG_LOG(format, ...) do { \
    2924  printf(format, __VA_ARGS__); \
    2925  printf("\n"); \
    2926  } while(false)
    2927  */
    2928 #endif
    2929 
    2930 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2931 #if VMA_STATS_STRING_ENABLED
    2932  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2933  {
    2934  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2935  }
    2936  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2937  {
    2938  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2939  }
    2940  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2941  {
    2942  snprintf(outStr, strLen, "%p", ptr);
    2943  }
    2944 #endif
    2945 
    2946 #ifndef VMA_MUTEX
    2947  class VmaMutex
    2948  {
    2949  public:
    2950  VmaMutex() { }
    2951  ~VmaMutex() { }
    2952  void Lock() { m_Mutex.lock(); }
    2953  void Unlock() { m_Mutex.unlock(); }
    2954  private:
    2955  std::mutex m_Mutex;
    2956  };
    2957  #define VMA_MUTEX VmaMutex
    2958 #endif
    2959 
    2960 /*
    2961 If providing your own implementation, you need to implement a subset of std::atomic:
    2962 
    2963 - Constructor(uint32_t desired)
    2964 - uint32_t load() const
    2965 - void store(uint32_t desired)
    2966 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2967 */
    2968 #ifndef VMA_ATOMIC_UINT32
    2969  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2970 #endif
    2971 
    2972 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2973 
    2977  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2978 #endif
    2979 
    2980 #ifndef VMA_DEBUG_ALIGNMENT
    2981 
    2985  #define VMA_DEBUG_ALIGNMENT (1)
    2986 #endif
    2987 
    2988 #ifndef VMA_DEBUG_MARGIN
    2989 
    2993  #define VMA_DEBUG_MARGIN (0)
    2994 #endif
    2995 
    2996 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2997 
    3001  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3002 #endif
    3003 
    3004 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3005 
    3010  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3011 #endif
    3012 
    3013 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3014 
    3018  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3019 #endif
    3020 
    3021 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3022 
    3026  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3027 #endif
    3028 
    3029 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3030  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3032 #endif
    3033 
    3034 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3035  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3037 #endif
    3038 
    3039 #ifndef VMA_CLASS_NO_COPY
    3040  #define VMA_CLASS_NO_COPY(className) \
    3041  private: \
    3042  className(const className&) = delete; \
    3043  className& operator=(const className&) = delete;
    3044 #endif
    3045 
    3046 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3047 
    3048 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3049 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3050 
    3051 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3053 
    3054 /*******************************************************************************
    3055 END OF CONFIGURATION
    3056 */
    3057 
    3058 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3059  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3060 
    3061 // Returns number of bits set to 1 in (v).
    3062 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3063 {
    3064  uint32_t c = v - ((v >> 1) & 0x55555555);
    3065  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3066  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3067  c = ((c >> 8) + c) & 0x00FF00FF;
    3068  c = ((c >> 16) + c) & 0x0000FFFF;
    3069  return c;
    3070 }
    3071 
    3072 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3073 // Use types like uint32_t, uint64_t as T.
    3074 template <typename T>
    3075 static inline T VmaAlignUp(T val, T align)
    3076 {
    3077  return (val + align - 1) / align * align;
    3078 }
    3079 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3080 // Use types like uint32_t, uint64_t as T.
    3081 template <typename T>
    3082 static inline T VmaAlignDown(T val, T align)
    3083 {
    3084  return val / align * align;
    3085 }
    3086 
    3087 // Division with mathematical rounding to nearest number.
    3088 template <typename T>
    3089 static inline T VmaRoundDiv(T x, T y)
    3090 {
    3091  return (x + (y / (T)2)) / y;
    3092 }
    3093 
    3094 /*
    3095 Returns true if given number is a power of two.
    3096 T must be unsigned integer number or signed integer but always nonnegative.
    3097 For 0 returns true.
    3098 */
    3099 template <typename T>
    3100 inline bool VmaIsPow2(T x)
    3101 {
    3102  return (x & (x-1)) == 0;
    3103 }
    3104 
    3105 // Returns smallest power of 2 greater or equal to v.
    3106 static inline uint32_t VmaNextPow2(uint32_t v)
    3107 {
    3108  v--;
    3109  v |= v >> 1;
    3110  v |= v >> 2;
    3111  v |= v >> 4;
    3112  v |= v >> 8;
    3113  v |= v >> 16;
    3114  v++;
    3115  return v;
    3116 }
    3117 static inline uint64_t VmaNextPow2(uint64_t v)
    3118 {
    3119  v--;
    3120  v |= v >> 1;
    3121  v |= v >> 2;
    3122  v |= v >> 4;
    3123  v |= v >> 8;
    3124  v |= v >> 16;
    3125  v |= v >> 32;
    3126  v++;
    3127  return v;
    3128 }
    3129 
    3130 // Returns largest power of 2 less or equal to v.
    3131 static inline uint32_t VmaPrevPow2(uint32_t v)
    3132 {
    3133  v |= v >> 1;
    3134  v |= v >> 2;
    3135  v |= v >> 4;
    3136  v |= v >> 8;
    3137  v |= v >> 16;
    3138  v = v ^ (v >> 1);
    3139  return v;
    3140 }
    3141 static inline uint64_t VmaPrevPow2(uint64_t v)
    3142 {
    3143  v |= v >> 1;
    3144  v |= v >> 2;
    3145  v |= v >> 4;
    3146  v |= v >> 8;
    3147  v |= v >> 16;
    3148  v |= v >> 32;
    3149  v = v ^ (v >> 1);
    3150  return v;
    3151 }
    3152 
    3153 static inline bool VmaStrIsEmpty(const char* pStr)
    3154 {
    3155  return pStr == VMA_NULL || *pStr == '\0';
    3156 }
    3157 
    3158 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3159 {
    3160  switch(algorithm)
    3161  {
    3163  return "Linear";
    3165  return "Buddy";
    3166  case 0:
    3167  return "Default";
    3168  default:
    3169  VMA_ASSERT(0);
    3170  return "";
    3171  }
    3172 }
    3173 
    3174 #ifndef VMA_SORT
    3175 
    3176 template<typename Iterator, typename Compare>
    3177 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3178 {
    3179  Iterator centerValue = end; --centerValue;
    3180  Iterator insertIndex = beg;
    3181  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3182  {
    3183  if(cmp(*memTypeIndex, *centerValue))
    3184  {
    3185  if(insertIndex != memTypeIndex)
    3186  {
    3187  VMA_SWAP(*memTypeIndex, *insertIndex);
    3188  }
    3189  ++insertIndex;
    3190  }
    3191  }
    3192  if(insertIndex != centerValue)
    3193  {
    3194  VMA_SWAP(*insertIndex, *centerValue);
    3195  }
    3196  return insertIndex;
    3197 }
    3198 
    3199 template<typename Iterator, typename Compare>
    3200 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3201 {
    3202  if(beg < end)
    3203  {
    3204  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3205  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3206  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3207  }
    3208 }
    3209 
    3210 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3211 
    3212 #endif // #ifndef VMA_SORT
    3213 
    3214 /*
    3215 Returns true if two memory blocks occupy overlapping pages.
    3216 ResourceA must be in less memory offset than ResourceB.
    3217 
    3218 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3219 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3220 */
    3221 static inline bool VmaBlocksOnSamePage(
    3222  VkDeviceSize resourceAOffset,
    3223  VkDeviceSize resourceASize,
    3224  VkDeviceSize resourceBOffset,
    3225  VkDeviceSize pageSize)
    3226 {
    3227  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3228  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3229  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3230  VkDeviceSize resourceBStart = resourceBOffset;
    3231  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3232  return resourceAEndPage == resourceBStartPage;
    3233 }
    3234 
    3235 enum VmaSuballocationType
    3236 {
    3237  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3238  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3239  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3240  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3243  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3244 };
    3245 
    3246 /*
    3247 Returns true if given suballocation types could conflict and must respect
    3248 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3249 or linear image and another one is optimal image. If type is unknown, behave
    3250 conservatively.
    3251 */
    3252 static inline bool VmaIsBufferImageGranularityConflict(
    3253  VmaSuballocationType suballocType1,
    3254  VmaSuballocationType suballocType2)
    3255 {
    3256  if(suballocType1 > suballocType2)
    3257  {
    3258  VMA_SWAP(suballocType1, suballocType2);
    3259  }
    3260 
    3261  switch(suballocType1)
    3262  {
    3263  case VMA_SUBALLOCATION_TYPE_FREE:
    3264  return false;
    3265  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3266  return true;
    3267  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3268  return
    3269  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3271  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3272  return
    3273  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3276  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3277  return
    3278  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3279  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3280  return false;
    3281  default:
    3282  VMA_ASSERT(0);
    3283  return true;
    3284  }
    3285 }
    3286 
    3287 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3288 {
    3289  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3290  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3291  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3292  {
    3293  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3294  }
    3295 }
    3296 
    3297 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3298 {
    3299  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3300  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3301  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3302  {
    3303  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3304  {
    3305  return false;
    3306  }
    3307  }
    3308  return true;
    3309 }
    3310 
    3311 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3312 struct VmaMutexLock
    3313 {
    3314  VMA_CLASS_NO_COPY(VmaMutexLock)
    3315 public:
    3316  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3317  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3318  {
    3319  if(m_pMutex)
    3320  {
    3321  m_pMutex->Lock();
    3322  }
    3323  }
    3324 
    3325  ~VmaMutexLock()
    3326  {
    3327  if(m_pMutex)
    3328  {
    3329  m_pMutex->Unlock();
    3330  }
    3331  }
    3332 
    3333 private:
    3334  VMA_MUTEX* m_pMutex;
    3335 };
    3336 
    3337 #if VMA_DEBUG_GLOBAL_MUTEX
    3338  static VMA_MUTEX gDebugGlobalMutex;
    3339  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3340 #else
    3341  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3342 #endif
    3343 
    3344 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3345 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3346 
    3347 /*
    3348 Performs binary search and returns iterator to first element that is greater or
    3349 equal to (key), according to comparison (cmp).
    3350 
    3351 Cmp should return true if first argument is less than second argument.
    3352 
    3353 Returned value is the found element, if present in the collection or place where
    3354 new element with value (key) should be inserted.
    3355 */
    3356 template <typename CmpLess, typename IterT, typename KeyT>
    3357 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3358 {
    3359  size_t down = 0, up = (end - beg);
    3360  while(down < up)
    3361  {
    3362  const size_t mid = (down + up) / 2;
    3363  if(cmp(*(beg+mid), key))
    3364  {
    3365  down = mid + 1;
    3366  }
    3367  else
    3368  {
    3369  up = mid;
    3370  }
    3371  }
    3372  return beg + down;
    3373 }
    3374 
    3376 // Memory allocation
    3377 
    3378 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3379 {
    3380  if((pAllocationCallbacks != VMA_NULL) &&
    3381  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3382  {
    3383  return (*pAllocationCallbacks->pfnAllocation)(
    3384  pAllocationCallbacks->pUserData,
    3385  size,
    3386  alignment,
    3387  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3388  }
    3389  else
    3390  {
    3391  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3392  }
    3393 }
    3394 
    3395 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3396 {
    3397  if((pAllocationCallbacks != VMA_NULL) &&
    3398  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3399  {
    3400  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3401  }
    3402  else
    3403  {
    3404  VMA_SYSTEM_FREE(ptr);
    3405  }
    3406 }
    3407 
    3408 template<typename T>
    3409 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3410 {
    3411  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3412 }
    3413 
    3414 template<typename T>
    3415 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3416 {
    3417  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3418 }
    3419 
    3420 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3421 
    3422 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3423 
    3424 template<typename T>
    3425 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3426 {
    3427  ptr->~T();
    3428  VmaFree(pAllocationCallbacks, ptr);
    3429 }
    3430 
    3431 template<typename T>
    3432 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3433 {
    3434  if(ptr != VMA_NULL)
    3435  {
    3436  for(size_t i = count; i--; )
    3437  {
    3438  ptr[i].~T();
    3439  }
    3440  VmaFree(pAllocationCallbacks, ptr);
    3441  }
    3442 }
    3443 
    3444 // STL-compatible allocator.
    3445 template<typename T>
    3446 class VmaStlAllocator
    3447 {
    3448 public:
    3449  const VkAllocationCallbacks* const m_pCallbacks;
    3450  typedef T value_type;
    3451 
    3452  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3453  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3454 
    3455  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3456  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3457 
    3458  template<typename U>
    3459  bool operator==(const VmaStlAllocator<U>& rhs) const
    3460  {
    3461  return m_pCallbacks == rhs.m_pCallbacks;
    3462  }
    3463  template<typename U>
    3464  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3465  {
    3466  return m_pCallbacks != rhs.m_pCallbacks;
    3467  }
    3468 
    3469  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3470 };
    3471 
    3472 #if VMA_USE_STL_VECTOR
    3473 
    3474 #define VmaVector std::vector
    3475 
    3476 template<typename T, typename allocatorT>
    3477 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3478 {
    3479  vec.insert(vec.begin() + index, item);
    3480 }
    3481 
    3482 template<typename T, typename allocatorT>
    3483 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3484 {
    3485  vec.erase(vec.begin() + index);
    3486 }
    3487 
    3488 #else // #if VMA_USE_STL_VECTOR
    3489 
    3490 /* Class with interface compatible with subset of std::vector.
    3491 T must be POD because constructors and destructors are not called and memcpy is
    3492 used for these objects. */
    3493 template<typename T, typename AllocatorT>
    3494 class VmaVector
    3495 {
    3496 public:
    3497  typedef T value_type;
    3498 
    3499  VmaVector(const AllocatorT& allocator) :
    3500  m_Allocator(allocator),
    3501  m_pArray(VMA_NULL),
    3502  m_Count(0),
    3503  m_Capacity(0)
    3504  {
    3505  }
    3506 
    3507  VmaVector(size_t count, const AllocatorT& allocator) :
    3508  m_Allocator(allocator),
    3509  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3510  m_Count(count),
    3511  m_Capacity(count)
    3512  {
    3513  }
    3514 
    3515  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3516  m_Allocator(src.m_Allocator),
    3517  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3518  m_Count(src.m_Count),
    3519  m_Capacity(src.m_Count)
    3520  {
    3521  if(m_Count != 0)
    3522  {
    3523  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3524  }
    3525  }
    3526 
    3527  ~VmaVector()
    3528  {
    3529  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3530  }
    3531 
    3532  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3533  {
    3534  if(&rhs != this)
    3535  {
    3536  resize(rhs.m_Count);
    3537  if(m_Count != 0)
    3538  {
    3539  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3540  }
    3541  }
    3542  return *this;
    3543  }
    3544 
    3545  bool empty() const { return m_Count == 0; }
    3546  size_t size() const { return m_Count; }
    3547  T* data() { return m_pArray; }
    3548  const T* data() const { return m_pArray; }
    3549 
    3550  T& operator[](size_t index)
    3551  {
    3552  VMA_HEAVY_ASSERT(index < m_Count);
    3553  return m_pArray[index];
    3554  }
    3555  const T& operator[](size_t index) const
    3556  {
    3557  VMA_HEAVY_ASSERT(index < m_Count);
    3558  return m_pArray[index];
    3559  }
    3560 
    3561  T& front()
    3562  {
    3563  VMA_HEAVY_ASSERT(m_Count > 0);
    3564  return m_pArray[0];
    3565  }
    3566  const T& front() const
    3567  {
    3568  VMA_HEAVY_ASSERT(m_Count > 0);
    3569  return m_pArray[0];
    3570  }
    3571  T& back()
    3572  {
    3573  VMA_HEAVY_ASSERT(m_Count > 0);
    3574  return m_pArray[m_Count - 1];
    3575  }
    3576  const T& back() const
    3577  {
    3578  VMA_HEAVY_ASSERT(m_Count > 0);
    3579  return m_pArray[m_Count - 1];
    3580  }
    3581 
    3582  void reserve(size_t newCapacity, bool freeMemory = false)
    3583  {
    3584  newCapacity = VMA_MAX(newCapacity, m_Count);
    3585 
    3586  if((newCapacity < m_Capacity) && !freeMemory)
    3587  {
    3588  newCapacity = m_Capacity;
    3589  }
    3590 
    3591  if(newCapacity != m_Capacity)
    3592  {
    3593  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3594  if(m_Count != 0)
    3595  {
    3596  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3597  }
    3598  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3599  m_Capacity = newCapacity;
    3600  m_pArray = newArray;
    3601  }
    3602  }
    3603 
    3604  void resize(size_t newCount, bool freeMemory = false)
    3605  {
    3606  size_t newCapacity = m_Capacity;
    3607  if(newCount > m_Capacity)
    3608  {
    3609  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3610  }
    3611  else if(freeMemory)
    3612  {
    3613  newCapacity = newCount;
    3614  }
    3615 
    3616  if(newCapacity != m_Capacity)
    3617  {
    3618  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3619  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3620  if(elementsToCopy != 0)
    3621  {
    3622  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3623  }
    3624  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3625  m_Capacity = newCapacity;
    3626  m_pArray = newArray;
    3627  }
    3628 
    3629  m_Count = newCount;
    3630  }
    3631 
    3632  void clear(bool freeMemory = false)
    3633  {
    3634  resize(0, freeMemory);
    3635  }
    3636 
    3637  void insert(size_t index, const T& src)
    3638  {
    3639  VMA_HEAVY_ASSERT(index <= m_Count);
    3640  const size_t oldCount = size();
    3641  resize(oldCount + 1);
    3642  if(index < oldCount)
    3643  {
    3644  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3645  }
    3646  m_pArray[index] = src;
    3647  }
    3648 
    3649  void remove(size_t index)
    3650  {
    3651  VMA_HEAVY_ASSERT(index < m_Count);
    3652  const size_t oldCount = size();
    3653  if(index < oldCount - 1)
    3654  {
    3655  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3656  }
    3657  resize(oldCount - 1);
    3658  }
    3659 
    3660  void push_back(const T& src)
    3661  {
    3662  const size_t newIndex = size();
    3663  resize(newIndex + 1);
    3664  m_pArray[newIndex] = src;
    3665  }
    3666 
    3667  void pop_back()
    3668  {
    3669  VMA_HEAVY_ASSERT(m_Count > 0);
    3670  resize(size() - 1);
    3671  }
    3672 
    3673  void push_front(const T& src)
    3674  {
    3675  insert(0, src);
    3676  }
    3677 
    3678  void pop_front()
    3679  {
    3680  VMA_HEAVY_ASSERT(m_Count > 0);
    3681  remove(0);
    3682  }
    3683 
    3684  typedef T* iterator;
    3685 
    3686  iterator begin() { return m_pArray; }
    3687  iterator end() { return m_pArray + m_Count; }
    3688 
    3689 private:
    3690  AllocatorT m_Allocator;
    3691  T* m_pArray;
    3692  size_t m_Count;
    3693  size_t m_Capacity;
    3694 };
    3695 
    3696 template<typename T, typename allocatorT>
    3697 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3698 {
    3699  vec.insert(index, item);
    3700 }
    3701 
    3702 template<typename T, typename allocatorT>
    3703 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3704 {
    3705  vec.remove(index);
    3706 }
    3707 
    3708 #endif // #if VMA_USE_STL_VECTOR
    3709 
    3710 template<typename CmpLess, typename VectorT>
    3711 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3712 {
    3713  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3714  vector.data(),
    3715  vector.data() + vector.size(),
    3716  value,
    3717  CmpLess()) - vector.data();
    3718  VmaVectorInsert(vector, indexToInsert, value);
    3719  return indexToInsert;
    3720 }
    3721 
    3722 template<typename CmpLess, typename VectorT>
    3723 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3724 {
    3725  CmpLess comparator;
    3726  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3727  vector.begin(),
    3728  vector.end(),
    3729  value,
    3730  comparator);
    3731  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3732  {
    3733  size_t indexToRemove = it - vector.begin();
    3734  VmaVectorRemove(vector, indexToRemove);
    3735  return true;
    3736  }
    3737  return false;
    3738 }
    3739 
    3740 template<typename CmpLess, typename IterT, typename KeyT>
    3741 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3742 {
    3743  CmpLess comparator;
    3744  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3745  beg, end, value, comparator);
    3746  if(it == end ||
    3747  (!comparator(*it, value) && !comparator(value, *it)))
    3748  {
    3749  return it;
    3750  }
    3751  return end;
    3752 }
    3753 
    3755 // class VmaPoolAllocator
    3756 
    3757 /*
    3758 Allocator for objects of type T using a list of arrays (pools) to speed up
    3759 allocation. Number of elements that can be allocated is not bounded because
    3760 allocator can create multiple blocks.
    3761 */
    3762 template<typename T>
    3763 class VmaPoolAllocator
    3764 {
    3765  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3766 public:
    3767  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3768  ~VmaPoolAllocator();
    3769  void Clear();
    3770  T* Alloc();
    3771  void Free(T* ptr);
    3772 
    3773 private:
    3774  union Item
    3775  {
    3776  uint32_t NextFreeIndex;
    3777  T Value;
    3778  };
    3779 
    3780  struct ItemBlock
    3781  {
    3782  Item* pItems;
    3783  uint32_t FirstFreeIndex;
    3784  };
    3785 
    3786  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3787  size_t m_ItemsPerBlock;
    3788  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3789 
    3790  ItemBlock& CreateNewBlock();
    3791 };
    3792 
    3793 template<typename T>
    3794 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3795  m_pAllocationCallbacks(pAllocationCallbacks),
    3796  m_ItemsPerBlock(itemsPerBlock),
    3797  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3798 {
    3799  VMA_ASSERT(itemsPerBlock > 0);
    3800 }
    3801 
    3802 template<typename T>
    3803 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3804 {
    3805  Clear();
    3806 }
    3807 
    3808 template<typename T>
    3809 void VmaPoolAllocator<T>::Clear()
    3810 {
    3811  for(size_t i = m_ItemBlocks.size(); i--; )
    3812  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3813  m_ItemBlocks.clear();
    3814 }
    3815 
    3816 template<typename T>
    3817 T* VmaPoolAllocator<T>::Alloc()
    3818 {
    3819  for(size_t i = m_ItemBlocks.size(); i--; )
    3820  {
    3821  ItemBlock& block = m_ItemBlocks[i];
    3822  // This block has some free items: Use first one.
    3823  if(block.FirstFreeIndex != UINT32_MAX)
    3824  {
    3825  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3826  block.FirstFreeIndex = pItem->NextFreeIndex;
    3827  return &pItem->Value;
    3828  }
    3829  }
    3830 
    3831  // No block has free item: Create new one and use it.
    3832  ItemBlock& newBlock = CreateNewBlock();
    3833  Item* const pItem = &newBlock.pItems[0];
    3834  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3835  return &pItem->Value;
    3836 }
    3837 
    3838 template<typename T>
    3839 void VmaPoolAllocator<T>::Free(T* ptr)
    3840 {
    3841  // Search all memory blocks to find ptr.
    3842  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3843  {
    3844  ItemBlock& block = m_ItemBlocks[i];
    3845 
    3846  // Casting to union.
    3847  Item* pItemPtr;
    3848  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3849 
    3850  // Check if pItemPtr is in address range of this block.
    3851  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3852  {
    3853  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3854  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3855  block.FirstFreeIndex = index;
    3856  return;
    3857  }
    3858  }
    3859  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3860 }
    3861 
    3862 template<typename T>
    3863 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3864 {
    3865  ItemBlock newBlock = {
    3866  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3867 
    3868  m_ItemBlocks.push_back(newBlock);
    3869 
    3870  // Setup singly-linked list of all free items in this block.
    3871  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3872  newBlock.pItems[i].NextFreeIndex = i + 1;
    3873  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3874  return m_ItemBlocks.back();
    3875 }
    3876 
    3878 // class VmaRawList, VmaList
    3879 
    3880 #if VMA_USE_STL_LIST
    3881 
    3882 #define VmaList std::list
    3883 
    3884 #else // #if VMA_USE_STL_LIST
    3885 
    3886 template<typename T>
    3887 struct VmaListItem
    3888 {
    3889  VmaListItem* pPrev;
    3890  VmaListItem* pNext;
    3891  T Value;
    3892 };
    3893 
    3894 // Doubly linked list.
    3895 template<typename T>
    3896 class VmaRawList
    3897 {
    3898  VMA_CLASS_NO_COPY(VmaRawList)
    3899 public:
    3900  typedef VmaListItem<T> ItemType;
    3901 
    3902  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3903  ~VmaRawList();
    3904  void Clear();
    3905 
    3906  size_t GetCount() const { return m_Count; }
    3907  bool IsEmpty() const { return m_Count == 0; }
    3908 
    3909  ItemType* Front() { return m_pFront; }
    3910  const ItemType* Front() const { return m_pFront; }
    3911  ItemType* Back() { return m_pBack; }
    3912  const ItemType* Back() const { return m_pBack; }
    3913 
    3914  ItemType* PushBack();
    3915  ItemType* PushFront();
    3916  ItemType* PushBack(const T& value);
    3917  ItemType* PushFront(const T& value);
    3918  void PopBack();
    3919  void PopFront();
    3920 
    3921  // Item can be null - it means PushBack.
    3922  ItemType* InsertBefore(ItemType* pItem);
    3923  // Item can be null - it means PushFront.
    3924  ItemType* InsertAfter(ItemType* pItem);
    3925 
    3926  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3927  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3928 
    3929  void Remove(ItemType* pItem);
    3930 
    3931 private:
    3932  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3933  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3934  ItemType* m_pFront;
    3935  ItemType* m_pBack;
    3936  size_t m_Count;
    3937 };
    3938 
    3939 template<typename T>
    3940 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3941  m_pAllocationCallbacks(pAllocationCallbacks),
    3942  m_ItemAllocator(pAllocationCallbacks, 128),
    3943  m_pFront(VMA_NULL),
    3944  m_pBack(VMA_NULL),
    3945  m_Count(0)
    3946 {
    3947 }
    3948 
    3949 template<typename T>
    3950 VmaRawList<T>::~VmaRawList()
    3951 {
    3952  // Intentionally not calling Clear, because that would be unnecessary
    3953  // computations to return all items to m_ItemAllocator as free.
    3954 }
    3955 
    3956 template<typename T>
    3957 void VmaRawList<T>::Clear()
    3958 {
    3959  if(IsEmpty() == false)
    3960  {
    3961  ItemType* pItem = m_pBack;
    3962  while(pItem != VMA_NULL)
    3963  {
    3964  ItemType* const pPrevItem = pItem->pPrev;
    3965  m_ItemAllocator.Free(pItem);
    3966  pItem = pPrevItem;
    3967  }
    3968  m_pFront = VMA_NULL;
    3969  m_pBack = VMA_NULL;
    3970  m_Count = 0;
    3971  }
    3972 }
    3973 
    3974 template<typename T>
    3975 VmaListItem<T>* VmaRawList<T>::PushBack()
    3976 {
    3977  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3978  pNewItem->pNext = VMA_NULL;
    3979  if(IsEmpty())
    3980  {
    3981  pNewItem->pPrev = VMA_NULL;
    3982  m_pFront = pNewItem;
    3983  m_pBack = pNewItem;
    3984  m_Count = 1;
    3985  }
    3986  else
    3987  {
    3988  pNewItem->pPrev = m_pBack;
    3989  m_pBack->pNext = pNewItem;
    3990  m_pBack = pNewItem;
    3991  ++m_Count;
    3992  }
    3993  return pNewItem;
    3994 }
    3995 
    3996 template<typename T>
    3997 VmaListItem<T>* VmaRawList<T>::PushFront()
    3998 {
    3999  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4000  pNewItem->pPrev = VMA_NULL;
    4001  if(IsEmpty())
    4002  {
    4003  pNewItem->pNext = VMA_NULL;
    4004  m_pFront = pNewItem;
    4005  m_pBack = pNewItem;
    4006  m_Count = 1;
    4007  }
    4008  else
    4009  {
    4010  pNewItem->pNext = m_pFront;
    4011  m_pFront->pPrev = pNewItem;
    4012  m_pFront = pNewItem;
    4013  ++m_Count;
    4014  }
    4015  return pNewItem;
    4016 }
    4017 
    4018 template<typename T>
    4019 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4020 {
    4021  ItemType* const pNewItem = PushBack();
    4022  pNewItem->Value = value;
    4023  return pNewItem;
    4024 }
    4025 
    4026 template<typename T>
    4027 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4028 {
    4029  ItemType* const pNewItem = PushFront();
    4030  pNewItem->Value = value;
    4031  return pNewItem;
    4032 }
    4033 
    4034 template<typename T>
    4035 void VmaRawList<T>::PopBack()
    4036 {
    4037  VMA_HEAVY_ASSERT(m_Count > 0);
    4038  ItemType* const pBackItem = m_pBack;
    4039  ItemType* const pPrevItem = pBackItem->pPrev;
    4040  if(pPrevItem != VMA_NULL)
    4041  {
    4042  pPrevItem->pNext = VMA_NULL;
    4043  }
    4044  m_pBack = pPrevItem;
    4045  m_ItemAllocator.Free(pBackItem);
    4046  --m_Count;
    4047 }
    4048 
    4049 template<typename T>
    4050 void VmaRawList<T>::PopFront()
    4051 {
    4052  VMA_HEAVY_ASSERT(m_Count > 0);
    4053  ItemType* const pFrontItem = m_pFront;
    4054  ItemType* const pNextItem = pFrontItem->pNext;
    4055  if(pNextItem != VMA_NULL)
    4056  {
    4057  pNextItem->pPrev = VMA_NULL;
    4058  }
    4059  m_pFront = pNextItem;
    4060  m_ItemAllocator.Free(pFrontItem);
    4061  --m_Count;
    4062 }
    4063 
    4064 template<typename T>
    4065 void VmaRawList<T>::Remove(ItemType* pItem)
    4066 {
    4067  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4068  VMA_HEAVY_ASSERT(m_Count > 0);
    4069 
    4070  if(pItem->pPrev != VMA_NULL)
    4071  {
    4072  pItem->pPrev->pNext = pItem->pNext;
    4073  }
    4074  else
    4075  {
    4076  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4077  m_pFront = pItem->pNext;
    4078  }
    4079 
    4080  if(pItem->pNext != VMA_NULL)
    4081  {
    4082  pItem->pNext->pPrev = pItem->pPrev;
    4083  }
    4084  else
    4085  {
    4086  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4087  m_pBack = pItem->pPrev;
    4088  }
    4089 
    4090  m_ItemAllocator.Free(pItem);
    4091  --m_Count;
    4092 }
    4093 
    4094 template<typename T>
    4095 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4096 {
    4097  if(pItem != VMA_NULL)
    4098  {
    4099  ItemType* const prevItem = pItem->pPrev;
    4100  ItemType* const newItem = m_ItemAllocator.Alloc();
    4101  newItem->pPrev = prevItem;
    4102  newItem->pNext = pItem;
    4103  pItem->pPrev = newItem;
    4104  if(prevItem != VMA_NULL)
    4105  {
    4106  prevItem->pNext = newItem;
    4107  }
    4108  else
    4109  {
    4110  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4111  m_pFront = newItem;
    4112  }
    4113  ++m_Count;
    4114  return newItem;
    4115  }
    4116  else
    4117  return PushBack();
    4118 }
    4119 
    4120 template<typename T>
    4121 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4122 {
    4123  if(pItem != VMA_NULL)
    4124  {
    4125  ItemType* const nextItem = pItem->pNext;
    4126  ItemType* const newItem = m_ItemAllocator.Alloc();
    4127  newItem->pNext = nextItem;
    4128  newItem->pPrev = pItem;
    4129  pItem->pNext = newItem;
    4130  if(nextItem != VMA_NULL)
    4131  {
    4132  nextItem->pPrev = newItem;
    4133  }
    4134  else
    4135  {
    4136  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4137  m_pBack = newItem;
    4138  }
    4139  ++m_Count;
    4140  return newItem;
    4141  }
    4142  else
    4143  return PushFront();
    4144 }
    4145 
    4146 template<typename T>
    4147 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4148 {
    4149  ItemType* const newItem = InsertBefore(pItem);
    4150  newItem->Value = value;
    4151  return newItem;
    4152 }
    4153 
    4154 template<typename T>
    4155 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4156 {
    4157  ItemType* const newItem = InsertAfter(pItem);
    4158  newItem->Value = value;
    4159  return newItem;
    4160 }
    4161 
    4162 template<typename T, typename AllocatorT>
    4163 class VmaList
    4164 {
    4165  VMA_CLASS_NO_COPY(VmaList)
    4166 public:
    4167  class iterator
    4168  {
    4169  public:
    4170  iterator() :
    4171  m_pList(VMA_NULL),
    4172  m_pItem(VMA_NULL)
    4173  {
    4174  }
    4175 
    4176  T& operator*() const
    4177  {
    4178  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4179  return m_pItem->Value;
    4180  }
    4181  T* operator->() const
    4182  {
    4183  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4184  return &m_pItem->Value;
    4185  }
    4186 
    4187  iterator& operator++()
    4188  {
    4189  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4190  m_pItem = m_pItem->pNext;
    4191  return *this;
    4192  }
    4193  iterator& operator--()
    4194  {
    4195  if(m_pItem != VMA_NULL)
    4196  {
    4197  m_pItem = m_pItem->pPrev;
    4198  }
    4199  else
    4200  {
    4201  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4202  m_pItem = m_pList->Back();
    4203  }
    4204  return *this;
    4205  }
    4206 
    4207  iterator operator++(int)
    4208  {
    4209  iterator result = *this;
    4210  ++*this;
    4211  return result;
    4212  }
    4213  iterator operator--(int)
    4214  {
    4215  iterator result = *this;
    4216  --*this;
    4217  return result;
    4218  }
    4219 
    4220  bool operator==(const iterator& rhs) const
    4221  {
    4222  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4223  return m_pItem == rhs.m_pItem;
    4224  }
    4225  bool operator!=(const iterator& rhs) const
    4226  {
    4227  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4228  return m_pItem != rhs.m_pItem;
    4229  }
    4230 
    4231  private:
    4232  VmaRawList<T>* m_pList;
    4233  VmaListItem<T>* m_pItem;
    4234 
    4235  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4236  m_pList(pList),
    4237  m_pItem(pItem)
    4238  {
    4239  }
    4240 
    4241  friend class VmaList<T, AllocatorT>;
    4242  };
    4243 
    4244  class const_iterator
    4245  {
    4246  public:
    4247  const_iterator() :
    4248  m_pList(VMA_NULL),
    4249  m_pItem(VMA_NULL)
    4250  {
    4251  }
    4252 
    4253  const_iterator(const iterator& src) :
    4254  m_pList(src.m_pList),
    4255  m_pItem(src.m_pItem)
    4256  {
    4257  }
    4258 
    4259  const T& operator*() const
    4260  {
    4261  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4262  return m_pItem->Value;
    4263  }
    4264  const T* operator->() const
    4265  {
    4266  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4267  return &m_pItem->Value;
    4268  }
    4269 
    4270  const_iterator& operator++()
    4271  {
    4272  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4273  m_pItem = m_pItem->pNext;
    4274  return *this;
    4275  }
    4276  const_iterator& operator--()
    4277  {
    4278  if(m_pItem != VMA_NULL)
    4279  {
    4280  m_pItem = m_pItem->pPrev;
    4281  }
    4282  else
    4283  {
    4284  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4285  m_pItem = m_pList->Back();
    4286  }
    4287  return *this;
    4288  }
    4289 
    4290  const_iterator operator++(int)
    4291  {
    4292  const_iterator result = *this;
    4293  ++*this;
    4294  return result;
    4295  }
    4296  const_iterator operator--(int)
    4297  {
    4298  const_iterator result = *this;
    4299  --*this;
    4300  return result;
    4301  }
    4302 
    4303  bool operator==(const const_iterator& rhs) const
    4304  {
    4305  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4306  return m_pItem == rhs.m_pItem;
    4307  }
    4308  bool operator!=(const const_iterator& rhs) const
    4309  {
    4310  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4311  return m_pItem != rhs.m_pItem;
    4312  }
    4313 
    4314  private:
    4315  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4316  m_pList(pList),
    4317  m_pItem(pItem)
    4318  {
    4319  }
    4320 
    4321  const VmaRawList<T>* m_pList;
    4322  const VmaListItem<T>* m_pItem;
    4323 
    4324  friend class VmaList<T, AllocatorT>;
    4325  };
    4326 
    4327  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4328 
    4329  bool empty() const { return m_RawList.IsEmpty(); }
    4330  size_t size() const { return m_RawList.GetCount(); }
    4331 
    4332  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4333  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4334 
    4335  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4336  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4337 
    4338  void clear() { m_RawList.Clear(); }
    4339  void push_back(const T& value) { m_RawList.PushBack(value); }
    4340  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4341  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4342 
    4343 private:
    4344  VmaRawList<T> m_RawList;
    4345 };
    4346 
    4347 #endif // #if VMA_USE_STL_LIST
    4348 
    4350 // class VmaMap
    4351 
    4352 // Unused in this version.
    4353 #if 0
    4354 
    4355 #if VMA_USE_STL_UNORDERED_MAP
    4356 
    4357 #define VmaPair std::pair
    4358 
    4359 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4360  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4361 
    4362 #else // #if VMA_USE_STL_UNORDERED_MAP
    4363 
    4364 template<typename T1, typename T2>
    4365 struct VmaPair
    4366 {
    4367  T1 first;
    4368  T2 second;
    4369 
    4370  VmaPair() : first(), second() { }
    4371  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4372 };
    4373 
    4374 /* Class compatible with subset of interface of std::unordered_map.
    4375 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4376 */
    4377 template<typename KeyT, typename ValueT>
    4378 class VmaMap
    4379 {
    4380 public:
    4381  typedef VmaPair<KeyT, ValueT> PairType;
    4382  typedef PairType* iterator;
    4383 
    4384  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4385 
    4386  iterator begin() { return m_Vector.begin(); }
    4387  iterator end() { return m_Vector.end(); }
    4388 
    4389  void insert(const PairType& pair);
    4390  iterator find(const KeyT& key);
    4391  void erase(iterator it);
    4392 
    4393 private:
    4394  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4395 };
    4396 
    4397 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4398 
    4399 template<typename FirstT, typename SecondT>
    4400 struct VmaPairFirstLess
    4401 {
    4402  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4403  {
    4404  return lhs.first < rhs.first;
    4405  }
    4406  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4407  {
    4408  return lhs.first < rhsFirst;
    4409  }
    4410 };
    4411 
    4412 template<typename KeyT, typename ValueT>
    4413 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4414 {
    4415  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4416  m_Vector.data(),
    4417  m_Vector.data() + m_Vector.size(),
    4418  pair,
    4419  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4420  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4421 }
    4422 
    4423 template<typename KeyT, typename ValueT>
    4424 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4425 {
    4426  PairType* it = VmaBinaryFindFirstNotLess(
    4427  m_Vector.data(),
    4428  m_Vector.data() + m_Vector.size(),
    4429  key,
    4430  VmaPairFirstLess<KeyT, ValueT>());
    4431  if((it != m_Vector.end()) && (it->first == key))
    4432  {
    4433  return it;
    4434  }
    4435  else
    4436  {
    4437  return m_Vector.end();
    4438  }
    4439 }
    4440 
    4441 template<typename KeyT, typename ValueT>
    4442 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4443 {
    4444  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4445 }
    4446 
    4447 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4448 
    4449 #endif // #if 0
    4450 
    4452 
    4453 class VmaDeviceMemoryBlock;
    4454 
    4455 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4456 
    4457 struct VmaAllocation_T
    4458 {
    4459  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4460 private:
    4461  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4462 
    4463  enum FLAGS
    4464  {
    4465  FLAG_USER_DATA_STRING = 0x01,
    4466  };
    4467 
    4468 public:
    4469  enum ALLOCATION_TYPE
    4470  {
    4471  ALLOCATION_TYPE_NONE,
    4472  ALLOCATION_TYPE_BLOCK,
    4473  ALLOCATION_TYPE_DEDICATED,
    4474  };
    4475 
    4476  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4477  m_Alignment(1),
    4478  m_Size(0),
    4479  m_pUserData(VMA_NULL),
    4480  m_LastUseFrameIndex(currentFrameIndex),
    4481  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4482  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4483  m_MapCount(0),
    4484  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4485  {
    4486 #if VMA_STATS_STRING_ENABLED
    4487  m_CreationFrameIndex = currentFrameIndex;
    4488  m_BufferImageUsage = 0;
    4489 #endif
    4490  }
    4491 
    4492  ~VmaAllocation_T()
    4493  {
    4494  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4495 
    4496  // Check if owned string was freed.
    4497  VMA_ASSERT(m_pUserData == VMA_NULL);
    4498  }
    4499 
    4500  void InitBlockAllocation(
    4501  VmaPool hPool,
    4502  VmaDeviceMemoryBlock* block,
    4503  VkDeviceSize offset,
    4504  VkDeviceSize alignment,
    4505  VkDeviceSize size,
    4506  VmaSuballocationType suballocationType,
    4507  bool mapped,
    4508  bool canBecomeLost)
    4509  {
    4510  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4511  VMA_ASSERT(block != VMA_NULL);
    4512  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4513  m_Alignment = alignment;
    4514  m_Size = size;
    4515  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4516  m_SuballocationType = (uint8_t)suballocationType;
    4517  m_BlockAllocation.m_hPool = hPool;
    4518  m_BlockAllocation.m_Block = block;
    4519  m_BlockAllocation.m_Offset = offset;
    4520  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4521  }
    4522 
    4523  void InitLost()
    4524  {
    4525  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4526  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4527  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4528  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4529  m_BlockAllocation.m_Block = VMA_NULL;
    4530  m_BlockAllocation.m_Offset = 0;
    4531  m_BlockAllocation.m_CanBecomeLost = true;
    4532  }
    4533 
    4534  void ChangeBlockAllocation(
    4535  VmaAllocator hAllocator,
    4536  VmaDeviceMemoryBlock* block,
    4537  VkDeviceSize offset);
    4538 
    4539  void ChangeSize(VkDeviceSize newSize);
    4540 
    4541  // pMappedData not null means allocation is created with MAPPED flag.
    4542  void InitDedicatedAllocation(
    4543  uint32_t memoryTypeIndex,
    4544  VkDeviceMemory hMemory,
    4545  VmaSuballocationType suballocationType,
    4546  void* pMappedData,
    4547  VkDeviceSize size)
    4548  {
    4549  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4550  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4551  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4552  m_Alignment = 0;
    4553  m_Size = size;
    4554  m_SuballocationType = (uint8_t)suballocationType;
    4555  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4556  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4557  m_DedicatedAllocation.m_hMemory = hMemory;
    4558  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4559  }
    4560 
    4561  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4562  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4563  VkDeviceSize GetSize() const { return m_Size; }
    4564  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4565  void* GetUserData() const { return m_pUserData; }
    4566  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4567  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4568 
    4569  VmaDeviceMemoryBlock* GetBlock() const
    4570  {
    4571  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4572  return m_BlockAllocation.m_Block;
    4573  }
    4574  VkDeviceSize GetOffset() const;
    4575  VkDeviceMemory GetMemory() const;
    4576  uint32_t GetMemoryTypeIndex() const;
    4577  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4578  void* GetMappedData() const;
    4579  bool CanBecomeLost() const;
    4580  VmaPool GetPool() const;
    4581 
    4582  uint32_t GetLastUseFrameIndex() const
    4583  {
    4584  return m_LastUseFrameIndex.load();
    4585  }
    4586  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4587  {
    4588  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4589  }
    4590  /*
    4591  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4592  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4593  - Else, returns false.
    4594 
    4595  If hAllocation is already lost, assert - you should not call it then.
    4596  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4597  */
    4598  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4599 
    4600  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4601  {
    4602  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4603  outInfo.blockCount = 1;
    4604  outInfo.allocationCount = 1;
    4605  outInfo.unusedRangeCount = 0;
    4606  outInfo.usedBytes = m_Size;
    4607  outInfo.unusedBytes = 0;
    4608  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4609  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4610  outInfo.unusedRangeSizeMax = 0;
    4611  }
    4612 
    4613  void BlockAllocMap();
    4614  void BlockAllocUnmap();
    4615  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4616  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4617 
    4618 #if VMA_STATS_STRING_ENABLED
    4619  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4620  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4621 
    4622  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4623  {
    4624  VMA_ASSERT(m_BufferImageUsage == 0);
    4625  m_BufferImageUsage = bufferImageUsage;
    4626  }
    4627 
    4628  void PrintParameters(class VmaJsonWriter& json) const;
    4629 #endif
    4630 
    4631 private:
    4632  VkDeviceSize m_Alignment;
    4633  VkDeviceSize m_Size;
    4634  void* m_pUserData;
    4635  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4636  uint8_t m_Type; // ALLOCATION_TYPE
    4637  uint8_t m_SuballocationType; // VmaSuballocationType
    4638  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4639  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4640  uint8_t m_MapCount;
    4641  uint8_t m_Flags; // enum FLAGS
    4642 
    4643  // Allocation out of VmaDeviceMemoryBlock.
    4644  struct BlockAllocation
    4645  {
    4646  VmaPool m_hPool; // Null if belongs to general memory.
    4647  VmaDeviceMemoryBlock* m_Block;
    4648  VkDeviceSize m_Offset;
    4649  bool m_CanBecomeLost;
    4650  };
    4651 
    4652  // Allocation for an object that has its own private VkDeviceMemory.
    4653  struct DedicatedAllocation
    4654  {
    4655  uint32_t m_MemoryTypeIndex;
    4656  VkDeviceMemory m_hMemory;
    4657  void* m_pMappedData; // Not null means memory is mapped.
    4658  };
    4659 
    4660  union
    4661  {
    4662  // Allocation out of VmaDeviceMemoryBlock.
    4663  BlockAllocation m_BlockAllocation;
    4664  // Allocation for an object that has its own private VkDeviceMemory.
    4665  DedicatedAllocation m_DedicatedAllocation;
    4666  };
    4667 
    4668 #if VMA_STATS_STRING_ENABLED
    4669  uint32_t m_CreationFrameIndex;
    4670  uint32_t m_BufferImageUsage; // 0 if unknown.
    4671 #endif
    4672 
    4673  void FreeUserDataString(VmaAllocator hAllocator);
    4674 };
    4675 
    4676 /*
    4677 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4678 allocated memory block or free.
    4679 */
    4680 struct VmaSuballocation
    4681 {
    4682  VkDeviceSize offset;
    4683  VkDeviceSize size;
    4684  VmaAllocation hAllocation;
    4685  VmaSuballocationType type;
    4686 };
    4687 
    4688 // Comparator for offsets.
    4689 struct VmaSuballocationOffsetLess
    4690 {
    4691  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4692  {
    4693  return lhs.offset < rhs.offset;
    4694  }
    4695 };
    4696 struct VmaSuballocationOffsetGreater
    4697 {
    4698  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4699  {
    4700  return lhs.offset > rhs.offset;
    4701  }
    4702 };
    4703 
    4704 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4705 
    4706 // Cost of one additional allocation lost, as equivalent in bytes.
    4707 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4708 
    4709 /*
    4710 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4711 
    4712 If canMakeOtherLost was false:
    4713 - item points to a FREE suballocation.
    4714 - itemsToMakeLostCount is 0.
    4715 
    4716 If canMakeOtherLost was true:
    4717 - item points to first of sequence of suballocations, which are either FREE,
    4718  or point to VmaAllocations that can become lost.
    4719 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4720  the requested allocation to succeed.
    4721 */
    4722 struct VmaAllocationRequest
    4723 {
    4724  VkDeviceSize offset;
    4725  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4726  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4727  VmaSuballocationList::iterator item;
    4728  size_t itemsToMakeLostCount;
    4729  void* customData;
    4730 
    4731  VkDeviceSize CalcCost() const
    4732  {
    4733  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4734  }
    4735 };
    4736 
    4737 /*
    4738 Data structure used for bookkeeping of allocations and unused ranges of memory
    4739 in a single VkDeviceMemory block.
    4740 */
    4741 class VmaBlockMetadata
    4742 {
    4743 public:
    4744  VmaBlockMetadata(VmaAllocator hAllocator);
    4745  virtual ~VmaBlockMetadata() { }
    4746  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4747 
    4748  // Validates all data structures inside this object. If not valid, returns false.
    4749  virtual bool Validate() const = 0;
    4750  VkDeviceSize GetSize() const { return m_Size; }
    4751  virtual size_t GetAllocationCount() const = 0;
    4752  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4753  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4754  // Returns true if this block is empty - contains only single free suballocation.
    4755  virtual bool IsEmpty() const = 0;
    4756 
    4757  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4758  // Shouldn't modify blockCount.
    4759  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4760 
    4761 #if VMA_STATS_STRING_ENABLED
    4762  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4763 #endif
    4764 
    4765  // Tries to find a place for suballocation with given parameters inside this block.
    4766  // If succeeded, fills pAllocationRequest and returns true.
    4767  // If failed, returns false.
    4768  virtual bool CreateAllocationRequest(
    4769  uint32_t currentFrameIndex,
    4770  uint32_t frameInUseCount,
    4771  VkDeviceSize bufferImageGranularity,
    4772  VkDeviceSize allocSize,
    4773  VkDeviceSize allocAlignment,
    4774  bool upperAddress,
    4775  VmaSuballocationType allocType,
    4776  bool canMakeOtherLost,
    4777  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4778  VmaAllocationRequest* pAllocationRequest) = 0;
    4779 
    4780  virtual bool MakeRequestedAllocationsLost(
    4781  uint32_t currentFrameIndex,
    4782  uint32_t frameInUseCount,
    4783  VmaAllocationRequest* pAllocationRequest) = 0;
    4784 
    4785  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4786 
    4787  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4788 
    4789  // Makes actual allocation based on request. Request must already be checked and valid.
    4790  virtual void Alloc(
    4791  const VmaAllocationRequest& request,
    4792  VmaSuballocationType type,
    4793  VkDeviceSize allocSize,
    4794  bool upperAddress,
    4795  VmaAllocation hAllocation) = 0;
    4796 
    4797  // Frees suballocation assigned to given memory region.
    4798  virtual void Free(const VmaAllocation allocation) = 0;
    4799  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4800 
    4801  // Tries to resize (grow or shrink) space for given allocation, in place.
    4802  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4803 
    4804 protected:
    4805  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4806 
    4807 #if VMA_STATS_STRING_ENABLED
    4808  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4809  VkDeviceSize unusedBytes,
    4810  size_t allocationCount,
    4811  size_t unusedRangeCount) const;
    4812  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4813  VkDeviceSize offset,
    4814  VmaAllocation hAllocation) const;
    4815  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4816  VkDeviceSize offset,
    4817  VkDeviceSize size) const;
    4818  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4819 #endif
    4820 
    4821 private:
    4822  VkDeviceSize m_Size;
    4823  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4824 };
    4825 
    4826 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4827  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4828  return false; \
    4829  } } while(false)
    4830 
    4831 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4832 {
    4833  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4834 public:
    4835  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4836  virtual ~VmaBlockMetadata_Generic();
    4837  virtual void Init(VkDeviceSize size);
    4838 
    4839  virtual bool Validate() const;
    4840  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4841  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4842  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4843  virtual bool IsEmpty() const;
    4844 
    4845  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4846  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4847 
    4848 #if VMA_STATS_STRING_ENABLED
    4849  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4850 #endif
    4851 
    4852  virtual bool CreateAllocationRequest(
    4853  uint32_t currentFrameIndex,
    4854  uint32_t frameInUseCount,
    4855  VkDeviceSize bufferImageGranularity,
    4856  VkDeviceSize allocSize,
    4857  VkDeviceSize allocAlignment,
    4858  bool upperAddress,
    4859  VmaSuballocationType allocType,
    4860  bool canMakeOtherLost,
    4861  uint32_t strategy,
    4862  VmaAllocationRequest* pAllocationRequest);
    4863 
    4864  virtual bool MakeRequestedAllocationsLost(
    4865  uint32_t currentFrameIndex,
    4866  uint32_t frameInUseCount,
    4867  VmaAllocationRequest* pAllocationRequest);
    4868 
    4869  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4870 
    4871  virtual VkResult CheckCorruption(const void* pBlockData);
    4872 
    4873  virtual void Alloc(
    4874  const VmaAllocationRequest& request,
    4875  VmaSuballocationType type,
    4876  VkDeviceSize allocSize,
    4877  bool upperAddress,
    4878  VmaAllocation hAllocation);
    4879 
    4880  virtual void Free(const VmaAllocation allocation);
    4881  virtual void FreeAtOffset(VkDeviceSize offset);
    4882 
    4883  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4884 
    4885 private:
    4886  uint32_t m_FreeCount;
    4887  VkDeviceSize m_SumFreeSize;
    4888  VmaSuballocationList m_Suballocations;
    4889  // Suballocations that are free and have size greater than certain threshold.
    4890  // Sorted by size, ascending.
    4891  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4892 
    4893  bool ValidateFreeSuballocationList() const;
    4894 
    4895  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4896  // If yes, fills pOffset and returns true. If no, returns false.
    4897  bool CheckAllocation(
    4898  uint32_t currentFrameIndex,
    4899  uint32_t frameInUseCount,
    4900  VkDeviceSize bufferImageGranularity,
    4901  VkDeviceSize allocSize,
    4902  VkDeviceSize allocAlignment,
    4903  VmaSuballocationType allocType,
    4904  VmaSuballocationList::const_iterator suballocItem,
    4905  bool canMakeOtherLost,
    4906  VkDeviceSize* pOffset,
    4907  size_t* itemsToMakeLostCount,
    4908  VkDeviceSize* pSumFreeSize,
    4909  VkDeviceSize* pSumItemSize) const;
    4910  // Given free suballocation, it merges it with following one, which must also be free.
    4911  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4912  // Releases given suballocation, making it free.
    4913  // Merges it with adjacent free suballocations if applicable.
    4914  // Returns iterator to new free suballocation at this place.
    4915  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4916  // Given free suballocation, it inserts it into sorted list of
    4917  // m_FreeSuballocationsBySize if it's suitable.
    4918  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4919  // Given free suballocation, it removes it from sorted list of
    4920  // m_FreeSuballocationsBySize if it's suitable.
    4921  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4922 };
    4923 
    4924 /*
    4925 Allocations and their references in internal data structure look like this:
    4926 
    4927 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4928 
    4929  0 +-------+
    4930  | |
    4931  | |
    4932  | |
    4933  +-------+
    4934  | Alloc | 1st[m_1stNullItemsBeginCount]
    4935  +-------+
    4936  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4937  +-------+
    4938  | ... |
    4939  +-------+
    4940  | Alloc | 1st[1st.size() - 1]
    4941  +-------+
    4942  | |
    4943  | |
    4944  | |
    4945 GetSize() +-------+
    4946 
    4947 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4948 
    4949  0 +-------+
    4950  | Alloc | 2nd[0]
    4951  +-------+
    4952  | Alloc | 2nd[1]
    4953  +-------+
    4954  | ... |
    4955  +-------+
    4956  | Alloc | 2nd[2nd.size() - 1]
    4957  +-------+
    4958  | |
    4959  | |
    4960  | |
    4961  +-------+
    4962  | Alloc | 1st[m_1stNullItemsBeginCount]
    4963  +-------+
    4964  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4965  +-------+
    4966  | ... |
    4967  +-------+
    4968  | Alloc | 1st[1st.size() - 1]
    4969  +-------+
    4970  | |
    4971 GetSize() +-------+
    4972 
    4973 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4974 
    4975  0 +-------+
    4976  | |
    4977  | |
    4978  | |
    4979  +-------+
    4980  | Alloc | 1st[m_1stNullItemsBeginCount]
    4981  +-------+
    4982  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4983  +-------+
    4984  | ... |
    4985  +-------+
    4986  | Alloc | 1st[1st.size() - 1]
    4987  +-------+
    4988  | |
    4989  | |
    4990  | |
    4991  +-------+
    4992  | Alloc | 2nd[2nd.size() - 1]
    4993  +-------+
    4994  | ... |
    4995  +-------+
    4996  | Alloc | 2nd[1]
    4997  +-------+
    4998  | Alloc | 2nd[0]
    4999 GetSize() +-------+
    5000 
    5001 */
    5002 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5003 {
    5004  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5005 public:
    5006  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5007  virtual ~VmaBlockMetadata_Linear();
    5008  virtual void Init(VkDeviceSize size);
    5009 
    5010  virtual bool Validate() const;
    5011  virtual size_t GetAllocationCount() const;
    5012  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5013  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5014  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5015 
    5016  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5017  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5018 
    5019 #if VMA_STATS_STRING_ENABLED
    5020  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5021 #endif
    5022 
    5023  virtual bool CreateAllocationRequest(
    5024  uint32_t currentFrameIndex,
    5025  uint32_t frameInUseCount,
    5026  VkDeviceSize bufferImageGranularity,
    5027  VkDeviceSize allocSize,
    5028  VkDeviceSize allocAlignment,
    5029  bool upperAddress,
    5030  VmaSuballocationType allocType,
    5031  bool canMakeOtherLost,
    5032  uint32_t strategy,
    5033  VmaAllocationRequest* pAllocationRequest);
    5034 
    5035  virtual bool MakeRequestedAllocationsLost(
    5036  uint32_t currentFrameIndex,
    5037  uint32_t frameInUseCount,
    5038  VmaAllocationRequest* pAllocationRequest);
    5039 
    5040  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5041 
    5042  virtual VkResult CheckCorruption(const void* pBlockData);
    5043 
    5044  virtual void Alloc(
    5045  const VmaAllocationRequest& request,
    5046  VmaSuballocationType type,
    5047  VkDeviceSize allocSize,
    5048  bool upperAddress,
    5049  VmaAllocation hAllocation);
    5050 
    5051  virtual void Free(const VmaAllocation allocation);
    5052  virtual void FreeAtOffset(VkDeviceSize offset);
    5053 
    5054 private:
    5055  /*
    5056  There are two suballocation vectors, used in ping-pong way.
    5057  The one with index m_1stVectorIndex is called 1st.
    5058  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5059  2nd can be non-empty only when 1st is not empty.
    5060  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5061  */
    5062  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5063 
    5064  enum SECOND_VECTOR_MODE
    5065  {
    5066  SECOND_VECTOR_EMPTY,
    5067  /*
    5068  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5069  all have smaller offset.
    5070  */
    5071  SECOND_VECTOR_RING_BUFFER,
    5072  /*
    5073  Suballocations in 2nd vector are upper side of double stack.
    5074  They all have offsets higher than those in 1st vector.
    5075  Top of this stack means smaller offsets, but higher indices in this vector.
    5076  */
    5077  SECOND_VECTOR_DOUBLE_STACK,
    5078  };
    5079 
    5080  VkDeviceSize m_SumFreeSize;
    5081  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5082  uint32_t m_1stVectorIndex;
    5083  SECOND_VECTOR_MODE m_2ndVectorMode;
    5084 
    5085  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5086  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5087  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5088  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5089 
    5090  // Number of items in 1st vector with hAllocation = null at the beginning.
    5091  size_t m_1stNullItemsBeginCount;
    5092  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5093  size_t m_1stNullItemsMiddleCount;
    5094  // Number of items in 2nd vector with hAllocation = null.
    5095  size_t m_2ndNullItemsCount;
    5096 
    5097  bool ShouldCompact1st() const;
    5098  void CleanupAfterFree();
    5099 };
    5100 
    5101 /*
    5102 - GetSize() is the original size of allocated memory block.
    5103 - m_UsableSize is this size aligned down to a power of two.
    5104  All allocations and calculations happen relative to m_UsableSize.
    5105 - GetUnusableSize() is the difference between them.
    5106  It is repoted as separate, unused range, not available for allocations.
    5107 
    5108 Node at level 0 has size = m_UsableSize.
    5109 Each next level contains nodes with size 2 times smaller than current level.
    5110 m_LevelCount is the maximum number of levels to use in the current object.
    5111 */
    5112 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5113 {
    5114  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5115 public:
    5116  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5117  virtual ~VmaBlockMetadata_Buddy();
    5118  virtual void Init(VkDeviceSize size);
    5119 
    5120  virtual bool Validate() const;
    5121  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5122  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5123  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5124  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5125 
    5126  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5127  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5128 
    5129 #if VMA_STATS_STRING_ENABLED
    5130  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5131 #endif
    5132 
    5133  virtual bool CreateAllocationRequest(
    5134  uint32_t currentFrameIndex,
    5135  uint32_t frameInUseCount,
    5136  VkDeviceSize bufferImageGranularity,
    5137  VkDeviceSize allocSize,
    5138  VkDeviceSize allocAlignment,
    5139  bool upperAddress,
    5140  VmaSuballocationType allocType,
    5141  bool canMakeOtherLost,
    5142  uint32_t strategy,
    5143  VmaAllocationRequest* pAllocationRequest);
    5144 
    5145  virtual bool MakeRequestedAllocationsLost(
    5146  uint32_t currentFrameIndex,
    5147  uint32_t frameInUseCount,
    5148  VmaAllocationRequest* pAllocationRequest);
    5149 
    5150  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5151 
    5152  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5153 
    5154  virtual void Alloc(
    5155  const VmaAllocationRequest& request,
    5156  VmaSuballocationType type,
    5157  VkDeviceSize allocSize,
    5158  bool upperAddress,
    5159  VmaAllocation hAllocation);
    5160 
    5161  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5162  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5163 
    5164 private:
    5165  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5166  static const size_t MAX_LEVELS = 30;
    5167 
    5168  struct ValidationContext
    5169  {
    5170  size_t calculatedAllocationCount;
    5171  size_t calculatedFreeCount;
    5172  VkDeviceSize calculatedSumFreeSize;
    5173 
    5174  ValidationContext() :
    5175  calculatedAllocationCount(0),
    5176  calculatedFreeCount(0),
    5177  calculatedSumFreeSize(0) { }
    5178  };
    5179 
    5180  struct Node
    5181  {
    5182  VkDeviceSize offset;
    5183  enum TYPE
    5184  {
    5185  TYPE_FREE,
    5186  TYPE_ALLOCATION,
    5187  TYPE_SPLIT,
    5188  TYPE_COUNT
    5189  } type;
    5190  Node* parent;
    5191  Node* buddy;
    5192 
    5193  union
    5194  {
    5195  struct
    5196  {
    5197  Node* prev;
    5198  Node* next;
    5199  } free;
    5200  struct
    5201  {
    5202  VmaAllocation alloc;
    5203  } allocation;
    5204  struct
    5205  {
    5206  Node* leftChild;
    5207  } split;
    5208  };
    5209  };
    5210 
    5211  // Size of the memory block aligned down to a power of two.
    5212  VkDeviceSize m_UsableSize;
    5213  uint32_t m_LevelCount;
    5214 
    5215  Node* m_Root;
    5216  struct {
    5217  Node* front;
    5218  Node* back;
    5219  } m_FreeList[MAX_LEVELS];
    5220  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5221  size_t m_AllocationCount;
    5222  // Number of nodes in the tree with type == TYPE_FREE.
    5223  size_t m_FreeCount;
    5224  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5225  VkDeviceSize m_SumFreeSize;
    5226 
    5227  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5228  void DeleteNode(Node* node);
    5229  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5230  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5231  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5232  // Alloc passed just for validation. Can be null.
    5233  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5234  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5235  // Adds node to the front of FreeList at given level.
    5236  // node->type must be FREE.
    5237  // node->free.prev, next can be undefined.
    5238  void AddToFreeListFront(uint32_t level, Node* node);
    5239  // Removes node from FreeList at given level.
    5240  // node->type must be FREE.
    5241  // node->free.prev, next stay untouched.
    5242  void RemoveFromFreeList(uint32_t level, Node* node);
    5243 
    5244 #if VMA_STATS_STRING_ENABLED
    5245  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5246 #endif
    5247 };
    5248 
    5249 /*
    5250 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5251 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5252 
    5253 Thread-safety: This class must be externally synchronized.
    5254 */
    5255 class VmaDeviceMemoryBlock
    5256 {
    5257  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5258 public:
    5259  VmaBlockMetadata* m_pMetadata;
    5260 
    5261  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5262 
    5263  ~VmaDeviceMemoryBlock()
    5264  {
    5265  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5266  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5267  }
    5268 
    5269  // Always call after construction.
    5270  void Init(
    5271  VmaAllocator hAllocator,
    5272  uint32_t newMemoryTypeIndex,
    5273  VkDeviceMemory newMemory,
    5274  VkDeviceSize newSize,
    5275  uint32_t id,
    5276  uint32_t algorithm);
    5277  // Always call before destruction.
    5278  void Destroy(VmaAllocator allocator);
    5279 
    5280  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5281  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5282  uint32_t GetId() const { return m_Id; }
    5283  void* GetMappedData() const { return m_pMappedData; }
    5284 
    5285  // Validates all data structures inside this object. If not valid, returns false.
    5286  bool Validate() const;
    5287 
    5288  VkResult CheckCorruption(VmaAllocator hAllocator);
    5289 
    5290  // ppData can be null.
    5291  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5292  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5293 
    5294  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5295  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296 
    5297  VkResult BindBufferMemory(
    5298  const VmaAllocator hAllocator,
    5299  const VmaAllocation hAllocation,
    5300  VkBuffer hBuffer);
    5301  VkResult BindImageMemory(
    5302  const VmaAllocator hAllocator,
    5303  const VmaAllocation hAllocation,
    5304  VkImage hImage);
    5305 
    5306 private:
    5307  uint32_t m_MemoryTypeIndex;
    5308  uint32_t m_Id;
    5309  VkDeviceMemory m_hMemory;
    5310 
    5311  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5312  // Also protects m_MapCount, m_pMappedData.
    5313  VMA_MUTEX m_Mutex;
    5314  uint32_t m_MapCount;
    5315  void* m_pMappedData;
    5316 };
    5317 
    5318 struct VmaPointerLess
    5319 {
    5320  bool operator()(const void* lhs, const void* rhs) const
    5321  {
    5322  return lhs < rhs;
    5323  }
    5324 };
    5325 
    5326 class VmaDefragmentator;
    5327 
    5328 /*
    5329 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5330 Vulkan memory type.
    5331 
    5332 Synchronized internally with a mutex.
    5333 */
    5334 struct VmaBlockVector
    5335 {
    5336  VMA_CLASS_NO_COPY(VmaBlockVector)
    5337 public:
    5338  VmaBlockVector(
    5339  VmaAllocator hAllocator,
    5340  uint32_t memoryTypeIndex,
    5341  VkDeviceSize preferredBlockSize,
    5342  size_t minBlockCount,
    5343  size_t maxBlockCount,
    5344  VkDeviceSize bufferImageGranularity,
    5345  uint32_t frameInUseCount,
    5346  bool isCustomPool,
    5347  bool explicitBlockSize,
    5348  uint32_t algorithm);
    5349  ~VmaBlockVector();
    5350 
    5351  VkResult CreateMinBlocks();
    5352 
    5353  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5354  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5355  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5356  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5357  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5358 
    5359  void GetPoolStats(VmaPoolStats* pStats);
    5360 
    5361  bool IsEmpty() const { return m_Blocks.empty(); }
    5362  bool IsCorruptionDetectionEnabled() const;
    5363 
    5364  VkResult Allocate(
    5365  VmaPool hCurrentPool,
    5366  uint32_t currentFrameIndex,
    5367  VkDeviceSize size,
    5368  VkDeviceSize alignment,
    5369  const VmaAllocationCreateInfo& createInfo,
    5370  VmaSuballocationType suballocType,
    5371  VmaAllocation* pAllocation);
    5372 
    5373  void Free(
    5374  VmaAllocation hAllocation);
    5375 
    5376  // Adds statistics of this BlockVector to pStats.
    5377  void AddStats(VmaStats* pStats);
    5378 
    5379 #if VMA_STATS_STRING_ENABLED
    5380  void PrintDetailedMap(class VmaJsonWriter& json);
    5381 #endif
    5382 
    5383  void MakePoolAllocationsLost(
    5384  uint32_t currentFrameIndex,
    5385  size_t* pLostAllocationCount);
    5386  VkResult CheckCorruption();
    5387 
    5388  VmaDefragmentator* EnsureDefragmentator(
    5389  VmaAllocator hAllocator,
    5390  uint32_t currentFrameIndex);
    5391 
    5392  VkResult Defragment(
    5393  VmaDefragmentationStats* pDefragmentationStats,
    5394  VkDeviceSize& maxBytesToMove,
    5395  uint32_t& maxAllocationsToMove);
    5396 
    5397  void DestroyDefragmentator();
    5398 
    5399 private:
    5400  friend class VmaDefragmentator;
    5401 
    5402  const VmaAllocator m_hAllocator;
    5403  const uint32_t m_MemoryTypeIndex;
    5404  const VkDeviceSize m_PreferredBlockSize;
    5405  const size_t m_MinBlockCount;
    5406  const size_t m_MaxBlockCount;
    5407  const VkDeviceSize m_BufferImageGranularity;
    5408  const uint32_t m_FrameInUseCount;
    5409  const bool m_IsCustomPool;
    5410  const bool m_ExplicitBlockSize;
    5411  const uint32_t m_Algorithm;
    5412  bool m_HasEmptyBlock;
    5413  VMA_MUTEX m_Mutex;
    5414  // Incrementally sorted by sumFreeSize, ascending.
    5415  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5416  /* There can be at most one allocation that is completely empty - a
    5417  hysteresis to avoid pessimistic case of alternating creation and destruction
    5418  of a VkDeviceMemory. */
    5419  VmaDefragmentator* m_pDefragmentator;
    5420  uint32_t m_NextBlockId;
    5421 
    5422  VkDeviceSize CalcMaxBlockSize() const;
    5423 
    5424  // Finds and removes given block from vector.
    5425  void Remove(VmaDeviceMemoryBlock* pBlock);
    5426 
    5427  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5428  // after this call.
    5429  void IncrementallySortBlocks();
    5430 
    5431  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5432  VkResult AllocateFromBlock(
    5433  VmaDeviceMemoryBlock* pBlock,
    5434  VmaPool hCurrentPool,
    5435  uint32_t currentFrameIndex,
    5436  VkDeviceSize size,
    5437  VkDeviceSize alignment,
    5438  VmaAllocationCreateFlags allocFlags,
    5439  void* pUserData,
    5440  VmaSuballocationType suballocType,
    5441  uint32_t strategy,
    5442  VmaAllocation* pAllocation);
    5443 
    5444  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5445 };
    5446 
    5447 struct VmaPool_T
    5448 {
    5449  VMA_CLASS_NO_COPY(VmaPool_T)
    5450 public:
    5451  VmaBlockVector m_BlockVector;
    5452 
    5453  VmaPool_T(
    5454  VmaAllocator hAllocator,
    5455  const VmaPoolCreateInfo& createInfo,
    5456  VkDeviceSize preferredBlockSize);
    5457  ~VmaPool_T();
    5458 
    5459  uint32_t GetId() const { return m_Id; }
    5460  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5461 
    5462 #if VMA_STATS_STRING_ENABLED
    5463  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5464 #endif
    5465 
    5466 private:
    5467  uint32_t m_Id;
    5468 };
    5469 
    5470 class VmaDefragmentator
    5471 {
    5472  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5473 private:
    5474  const VmaAllocator m_hAllocator;
    5475  VmaBlockVector* const m_pBlockVector;
    5476  uint32_t m_CurrentFrameIndex;
    5477  VkDeviceSize m_BytesMoved;
    5478  uint32_t m_AllocationsMoved;
    5479 
    5480  struct AllocationInfo
    5481  {
    5482  VmaAllocation m_hAllocation;
    5483  VkBool32* m_pChanged;
    5484 
    5485  AllocationInfo() :
    5486  m_hAllocation(VK_NULL_HANDLE),
    5487  m_pChanged(VMA_NULL)
    5488  {
    5489  }
    5490  };
    5491 
    5492  struct AllocationInfoSizeGreater
    5493  {
    5494  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5495  {
    5496  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5497  }
    5498  };
    5499 
    5500  // Used between AddAllocation and Defragment.
    5501  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5502 
    5503  struct BlockInfo
    5504  {
    5505  VmaDeviceMemoryBlock* m_pBlock;
    5506  bool m_HasNonMovableAllocations;
    5507  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5508 
    5509  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5510  m_pBlock(VMA_NULL),
    5511  m_HasNonMovableAllocations(true),
    5512  m_Allocations(pAllocationCallbacks),
    5513  m_pMappedDataForDefragmentation(VMA_NULL)
    5514  {
    5515  }
    5516 
    5517  void CalcHasNonMovableAllocations()
    5518  {
    5519  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5520  const size_t defragmentAllocCount = m_Allocations.size();
    5521  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5522  }
    5523 
    5524  void SortAllocationsBySizeDescecnding()
    5525  {
    5526  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5527  }
    5528 
    5529  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5530  void Unmap(VmaAllocator hAllocator);
    5531 
    5532  private:
    5533  // Not null if mapped for defragmentation only, not originally mapped.
    5534  void* m_pMappedDataForDefragmentation;
    5535  };
    5536 
    5537  struct BlockPointerLess
    5538  {
    5539  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5540  {
    5541  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5542  }
    5543  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5544  {
    5545  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5546  }
    5547  };
    5548 
    5549  // 1. Blocks with some non-movable allocations go first.
    5550  // 2. Blocks with smaller sumFreeSize go first.
    5551  struct BlockInfoCompareMoveDestination
    5552  {
    5553  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5554  {
    5555  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5556  {
    5557  return true;
    5558  }
    5559  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5560  {
    5561  return false;
    5562  }
    5563  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5564  {
    5565  return true;
    5566  }
    5567  return false;
    5568  }
    5569  };
    5570 
    5571  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5572  BlockInfoVector m_Blocks;
    5573 
    5574  VkResult DefragmentRound(
    5575  VkDeviceSize maxBytesToMove,
    5576  uint32_t maxAllocationsToMove);
    5577 
    5578  static bool MoveMakesSense(
    5579  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5580  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5581 
    5582 public:
    5583  VmaDefragmentator(
    5584  VmaAllocator hAllocator,
    5585  VmaBlockVector* pBlockVector,
    5586  uint32_t currentFrameIndex);
    5587 
    5588  ~VmaDefragmentator();
    5589 
    5590  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5591  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5592 
    5593  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5594 
    5595  VkResult Defragment(
    5596  VkDeviceSize maxBytesToMove,
    5597  uint32_t maxAllocationsToMove);
    5598 };
    5599 
    5600 #if VMA_RECORDING_ENABLED
    5601 
    5602 class VmaRecorder
    5603 {
    5604 public:
    5605  VmaRecorder();
    5606  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5607  void WriteConfiguration(
    5608  const VkPhysicalDeviceProperties& devProps,
    5609  const VkPhysicalDeviceMemoryProperties& memProps,
    5610  bool dedicatedAllocationExtensionEnabled);
    5611  ~VmaRecorder();
    5612 
    5613  void RecordCreateAllocator(uint32_t frameIndex);
    5614  void RecordDestroyAllocator(uint32_t frameIndex);
    5615  void RecordCreatePool(uint32_t frameIndex,
    5616  const VmaPoolCreateInfo& createInfo,
    5617  VmaPool pool);
    5618  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5619  void RecordAllocateMemory(uint32_t frameIndex,
    5620  const VkMemoryRequirements& vkMemReq,
    5621  const VmaAllocationCreateInfo& createInfo,
    5622  VmaAllocation allocation);
    5623  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5624  const VkMemoryRequirements& vkMemReq,
    5625  bool requiresDedicatedAllocation,
    5626  bool prefersDedicatedAllocation,
    5627  const VmaAllocationCreateInfo& createInfo,
    5628  VmaAllocation allocation);
    5629  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5630  const VkMemoryRequirements& vkMemReq,
    5631  bool requiresDedicatedAllocation,
    5632  bool prefersDedicatedAllocation,
    5633  const VmaAllocationCreateInfo& createInfo,
    5634  VmaAllocation allocation);
    5635  void RecordFreeMemory(uint32_t frameIndex,
    5636  VmaAllocation allocation);
    5637  void RecordResizeAllocation(
    5638  uint32_t frameIndex,
    5639  VmaAllocation allocation,
    5640  VkDeviceSize newSize);
    5641  void RecordSetAllocationUserData(uint32_t frameIndex,
    5642  VmaAllocation allocation,
    5643  const void* pUserData);
    5644  void RecordCreateLostAllocation(uint32_t frameIndex,
    5645  VmaAllocation allocation);
    5646  void RecordMapMemory(uint32_t frameIndex,
    5647  VmaAllocation allocation);
    5648  void RecordUnmapMemory(uint32_t frameIndex,
    5649  VmaAllocation allocation);
    5650  void RecordFlushAllocation(uint32_t frameIndex,
    5651  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5652  void RecordInvalidateAllocation(uint32_t frameIndex,
    5653  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5654  void RecordCreateBuffer(uint32_t frameIndex,
    5655  const VkBufferCreateInfo& bufCreateInfo,
    5656  const VmaAllocationCreateInfo& allocCreateInfo,
    5657  VmaAllocation allocation);
    5658  void RecordCreateImage(uint32_t frameIndex,
    5659  const VkImageCreateInfo& imageCreateInfo,
    5660  const VmaAllocationCreateInfo& allocCreateInfo,
    5661  VmaAllocation allocation);
    5662  void RecordDestroyBuffer(uint32_t frameIndex,
    5663  VmaAllocation allocation);
    5664  void RecordDestroyImage(uint32_t frameIndex,
    5665  VmaAllocation allocation);
    5666  void RecordTouchAllocation(uint32_t frameIndex,
    5667  VmaAllocation allocation);
    5668  void RecordGetAllocationInfo(uint32_t frameIndex,
    5669  VmaAllocation allocation);
    5670  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5671  VmaPool pool);
    5672 
    5673 private:
    5674  struct CallParams
    5675  {
    5676  uint32_t threadId;
    5677  double time;
    5678  };
    5679 
    5680  class UserDataString
    5681  {
    5682  public:
    5683  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5684  const char* GetString() const { return m_Str; }
    5685 
    5686  private:
    5687  char m_PtrStr[17];
    5688  const char* m_Str;
    5689  };
    5690 
    5691  bool m_UseMutex;
    5692  VmaRecordFlags m_Flags;
    5693  FILE* m_File;
    5694  VMA_MUTEX m_FileMutex;
    5695  int64_t m_Freq;
    5696  int64_t m_StartCounter;
    5697 
    5698  void GetBasicParams(CallParams& outParams);
    5699  void Flush();
    5700 };
    5701 
    5702 #endif // #if VMA_RECORDING_ENABLED
    5703 
    5704 // Main allocator object.
    5705 struct VmaAllocator_T
    5706 {
    5707  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5708 public:
    5709  bool m_UseMutex;
    5710  bool m_UseKhrDedicatedAllocation;
    5711  VkDevice m_hDevice;
    5712  bool m_AllocationCallbacksSpecified;
    5713  VkAllocationCallbacks m_AllocationCallbacks;
    5714  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5715 
    5716  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5717  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5718  VMA_MUTEX m_HeapSizeLimitMutex;
    5719 
    5720  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5721  VkPhysicalDeviceMemoryProperties m_MemProps;
    5722 
    5723  // Default pools.
    5724  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5725 
    5726  // Each vector is sorted by memory (handle value).
    5727  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5728  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5729  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5730 
    5731  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5732  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  ~VmaAllocator_T();
    5734 
    5735  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5736  {
    5737  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5738  }
    5739  const VmaVulkanFunctions& GetVulkanFunctions() const
    5740  {
    5741  return m_VulkanFunctions;
    5742  }
    5743 
    5744  VkDeviceSize GetBufferImageGranularity() const
    5745  {
    5746  return VMA_MAX(
    5747  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5748  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5749  }
    5750 
    5751  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5752  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5753 
    5754  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5755  {
    5756  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5757  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5758  }
    5759  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5760  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5761  {
    5762  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5763  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5764  }
    5765  // Minimum alignment for all allocations in specific memory type.
    5766  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5767  {
    5768  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5769  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5770  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5771  }
    5772 
    5773  bool IsIntegratedGpu() const
    5774  {
    5775  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5776  }
    5777 
    5778 #if VMA_RECORDING_ENABLED
    5779  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5780 #endif
    5781 
    5782  void GetBufferMemoryRequirements(
    5783  VkBuffer hBuffer,
    5784  VkMemoryRequirements& memReq,
    5785  bool& requiresDedicatedAllocation,
    5786  bool& prefersDedicatedAllocation) const;
    5787  void GetImageMemoryRequirements(
    5788  VkImage hImage,
    5789  VkMemoryRequirements& memReq,
    5790  bool& requiresDedicatedAllocation,
    5791  bool& prefersDedicatedAllocation) const;
    5792 
    5793  // Main allocation function.
    5794  VkResult AllocateMemory(
    5795  const VkMemoryRequirements& vkMemReq,
    5796  bool requiresDedicatedAllocation,
    5797  bool prefersDedicatedAllocation,
    5798  VkBuffer dedicatedBuffer,
    5799  VkImage dedicatedImage,
    5800  const VmaAllocationCreateInfo& createInfo,
    5801  VmaSuballocationType suballocType,
    5802  VmaAllocation* pAllocation);
    5803 
    5804  // Main deallocation function.
    5805  void FreeMemory(const VmaAllocation allocation);
    5806 
    5807  VkResult ResizeAllocation(
    5808  const VmaAllocation alloc,
    5809  VkDeviceSize newSize);
    5810 
    5811  void CalculateStats(VmaStats* pStats);
    5812 
    5813 #if VMA_STATS_STRING_ENABLED
    5814  void PrintDetailedMap(class VmaJsonWriter& json);
    5815 #endif
    5816 
    5817  VkResult Defragment(
    5818  VmaAllocation* pAllocations,
    5819  size_t allocationCount,
    5820  VkBool32* pAllocationsChanged,
    5821  const VmaDefragmentationInfo* pDefragmentationInfo,
    5822  VmaDefragmentationStats* pDefragmentationStats);
    5823 
    5824  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5825  bool TouchAllocation(VmaAllocation hAllocation);
    5826 
    5827  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5828  void DestroyPool(VmaPool pool);
    5829  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5830 
    5831  void SetCurrentFrameIndex(uint32_t frameIndex);
    5832  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5833 
    5834  void MakePoolAllocationsLost(
    5835  VmaPool hPool,
    5836  size_t* pLostAllocationCount);
    5837  VkResult CheckPoolCorruption(VmaPool hPool);
    5838  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5839 
    5840  void CreateLostAllocation(VmaAllocation* pAllocation);
    5841 
    5842  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5843  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5844 
    5845  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5846  void Unmap(VmaAllocation hAllocation);
    5847 
    5848  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5849  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5850 
    5851  void FlushOrInvalidateAllocation(
    5852  VmaAllocation hAllocation,
    5853  VkDeviceSize offset, VkDeviceSize size,
    5854  VMA_CACHE_OPERATION op);
    5855 
    5856  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5857 
    5858 private:
    5859  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5860 
    5861  VkPhysicalDevice m_PhysicalDevice;
    5862  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5863 
    5864  VMA_MUTEX m_PoolsMutex;
    5865  // Protected by m_PoolsMutex. Sorted by pointer value.
    5866  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5867  uint32_t m_NextPoolId;
    5868 
    5869  VmaVulkanFunctions m_VulkanFunctions;
    5870 
    5871 #if VMA_RECORDING_ENABLED
    5872  VmaRecorder* m_pRecorder;
    5873 #endif
    5874 
    5875  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5876 
    5877  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5878 
    5879  VkResult AllocateMemoryOfType(
    5880  VkDeviceSize size,
    5881  VkDeviceSize alignment,
    5882  bool dedicatedAllocation,
    5883  VkBuffer dedicatedBuffer,
    5884  VkImage dedicatedImage,
    5885  const VmaAllocationCreateInfo& createInfo,
    5886  uint32_t memTypeIndex,
    5887  VmaSuballocationType suballocType,
    5888  VmaAllocation* pAllocation);
    5889 
    5890  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5891  VkResult AllocateDedicatedMemory(
    5892  VkDeviceSize size,
    5893  VmaSuballocationType suballocType,
    5894  uint32_t memTypeIndex,
    5895  bool map,
    5896  bool isUserDataString,
    5897  void* pUserData,
    5898  VkBuffer dedicatedBuffer,
    5899  VkImage dedicatedImage,
    5900  VmaAllocation* pAllocation);
    5901 
    5902  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5903  void FreeDedicatedMemory(VmaAllocation allocation);
    5904 };
    5905 
    5907 // Memory allocation #2 after VmaAllocator_T definition
    5908 
    5909 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5910 {
    5911  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5912 }
    5913 
    5914 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5915 {
    5916  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5917 }
    5918 
    5919 template<typename T>
    5920 static T* VmaAllocate(VmaAllocator hAllocator)
    5921 {
    5922  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5923 }
    5924 
    5925 template<typename T>
    5926 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5927 {
    5928  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5929 }
    5930 
    5931 template<typename T>
    5932 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5933 {
    5934  if(ptr != VMA_NULL)
    5935  {
    5936  ptr->~T();
    5937  VmaFree(hAllocator, ptr);
    5938  }
    5939 }
    5940 
    5941 template<typename T>
    5942 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5943 {
    5944  if(ptr != VMA_NULL)
    5945  {
    5946  for(size_t i = count; i--; )
    5947  ptr[i].~T();
    5948  VmaFree(hAllocator, ptr);
    5949  }
    5950 }
    5951 
    5953 // VmaStringBuilder
    5954 
    5955 #if VMA_STATS_STRING_ENABLED
    5956 
    5957 class VmaStringBuilder
    5958 {
    5959 public:
    5960  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5961  size_t GetLength() const { return m_Data.size(); }
    5962  const char* GetData() const { return m_Data.data(); }
    5963 
    5964  void Add(char ch) { m_Data.push_back(ch); }
    5965  void Add(const char* pStr);
    5966  void AddNewLine() { Add('\n'); }
    5967  void AddNumber(uint32_t num);
    5968  void AddNumber(uint64_t num);
    5969  void AddPointer(const void* ptr);
    5970 
    5971 private:
    5972  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5973 };
    5974 
    5975 void VmaStringBuilder::Add(const char* pStr)
    5976 {
    5977  const size_t strLen = strlen(pStr);
    5978  if(strLen > 0)
    5979  {
    5980  const size_t oldCount = m_Data.size();
    5981  m_Data.resize(oldCount + strLen);
    5982  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5983  }
    5984 }
    5985 
    5986 void VmaStringBuilder::AddNumber(uint32_t num)
    5987 {
    5988  char buf[11];
    5989  VmaUint32ToStr(buf, sizeof(buf), num);
    5990  Add(buf);
    5991 }
    5992 
    5993 void VmaStringBuilder::AddNumber(uint64_t num)
    5994 {
    5995  char buf[21];
    5996  VmaUint64ToStr(buf, sizeof(buf), num);
    5997  Add(buf);
    5998 }
    5999 
    6000 void VmaStringBuilder::AddPointer(const void* ptr)
    6001 {
    6002  char buf[21];
    6003  VmaPtrToStr(buf, sizeof(buf), ptr);
    6004  Add(buf);
    6005 }
    6006 
    6007 #endif // #if VMA_STATS_STRING_ENABLED
    6008 
    6010 // VmaJsonWriter
    6011 
    6012 #if VMA_STATS_STRING_ENABLED
    6013 
    6014 class VmaJsonWriter
    6015 {
    6016  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6017 public:
    6018  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6019  ~VmaJsonWriter();
    6020 
    6021  void BeginObject(bool singleLine = false);
    6022  void EndObject();
    6023 
    6024  void BeginArray(bool singleLine = false);
    6025  void EndArray();
    6026 
    6027  void WriteString(const char* pStr);
    6028  void BeginString(const char* pStr = VMA_NULL);
    6029  void ContinueString(const char* pStr);
    6030  void ContinueString(uint32_t n);
    6031  void ContinueString(uint64_t n);
    6032  void ContinueString_Pointer(const void* ptr);
    6033  void EndString(const char* pStr = VMA_NULL);
    6034 
    6035  void WriteNumber(uint32_t n);
    6036  void WriteNumber(uint64_t n);
    6037  void WriteBool(bool b);
    6038  void WriteNull();
    6039 
    6040 private:
    6041  static const char* const INDENT;
    6042 
    6043  enum COLLECTION_TYPE
    6044  {
    6045  COLLECTION_TYPE_OBJECT,
    6046  COLLECTION_TYPE_ARRAY,
    6047  };
    6048  struct StackItem
    6049  {
    6050  COLLECTION_TYPE type;
    6051  uint32_t valueCount;
    6052  bool singleLineMode;
    6053  };
    6054 
    6055  VmaStringBuilder& m_SB;
    6056  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6057  bool m_InsideString;
    6058 
    6059  void BeginValue(bool isString);
    6060  void WriteIndent(bool oneLess = false);
    6061 };
    6062 
    6063 const char* const VmaJsonWriter::INDENT = " ";
    6064 
    6065 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6066  m_SB(sb),
    6067  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6068  m_InsideString(false)
    6069 {
    6070 }
    6071 
    6072 VmaJsonWriter::~VmaJsonWriter()
    6073 {
    6074  VMA_ASSERT(!m_InsideString);
    6075  VMA_ASSERT(m_Stack.empty());
    6076 }
    6077 
    6078 void VmaJsonWriter::BeginObject(bool singleLine)
    6079 {
    6080  VMA_ASSERT(!m_InsideString);
    6081 
    6082  BeginValue(false);
    6083  m_SB.Add('{');
    6084 
    6085  StackItem item;
    6086  item.type = COLLECTION_TYPE_OBJECT;
    6087  item.valueCount = 0;
    6088  item.singleLineMode = singleLine;
    6089  m_Stack.push_back(item);
    6090 }
    6091 
    6092 void VmaJsonWriter::EndObject()
    6093 {
    6094  VMA_ASSERT(!m_InsideString);
    6095 
    6096  WriteIndent(true);
    6097  m_SB.Add('}');
    6098 
    6099  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6100  m_Stack.pop_back();
    6101 }
    6102 
    6103 void VmaJsonWriter::BeginArray(bool singleLine)
    6104 {
    6105  VMA_ASSERT(!m_InsideString);
    6106 
    6107  BeginValue(false);
    6108  m_SB.Add('[');
    6109 
    6110  StackItem item;
    6111  item.type = COLLECTION_TYPE_ARRAY;
    6112  item.valueCount = 0;
    6113  item.singleLineMode = singleLine;
    6114  m_Stack.push_back(item);
    6115 }
    6116 
    6117 void VmaJsonWriter::EndArray()
    6118 {
    6119  VMA_ASSERT(!m_InsideString);
    6120 
    6121  WriteIndent(true);
    6122  m_SB.Add(']');
    6123 
    6124  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6125  m_Stack.pop_back();
    6126 }
    6127 
    6128 void VmaJsonWriter::WriteString(const char* pStr)
    6129 {
    6130  BeginString(pStr);
    6131  EndString();
    6132 }
    6133 
    6134 void VmaJsonWriter::BeginString(const char* pStr)
    6135 {
    6136  VMA_ASSERT(!m_InsideString);
    6137 
    6138  BeginValue(true);
    6139  m_SB.Add('"');
    6140  m_InsideString = true;
    6141  if(pStr != VMA_NULL && pStr[0] != '\0')
    6142  {
    6143  ContinueString(pStr);
    6144  }
    6145 }
    6146 
    6147 void VmaJsonWriter::ContinueString(const char* pStr)
    6148 {
    6149  VMA_ASSERT(m_InsideString);
    6150 
    6151  const size_t strLen = strlen(pStr);
    6152  for(size_t i = 0; i < strLen; ++i)
    6153  {
    6154  char ch = pStr[i];
    6155  if(ch == '\\')
    6156  {
    6157  m_SB.Add("\\\\");
    6158  }
    6159  else if(ch == '"')
    6160  {
    6161  m_SB.Add("\\\"");
    6162  }
    6163  else if(ch >= 32)
    6164  {
    6165  m_SB.Add(ch);
    6166  }
    6167  else switch(ch)
    6168  {
    6169  case '\b':
    6170  m_SB.Add("\\b");
    6171  break;
    6172  case '\f':
    6173  m_SB.Add("\\f");
    6174  break;
    6175  case '\n':
    6176  m_SB.Add("\\n");
    6177  break;
    6178  case '\r':
    6179  m_SB.Add("\\r");
    6180  break;
    6181  case '\t':
    6182  m_SB.Add("\\t");
    6183  break;
    6184  default:
    6185  VMA_ASSERT(0 && "Character not currently supported.");
    6186  break;
    6187  }
    6188  }
    6189 }
    6190 
    6191 void VmaJsonWriter::ContinueString(uint32_t n)
    6192 {
    6193  VMA_ASSERT(m_InsideString);
    6194  m_SB.AddNumber(n);
    6195 }
    6196 
    6197 void VmaJsonWriter::ContinueString(uint64_t n)
    6198 {
    6199  VMA_ASSERT(m_InsideString);
    6200  m_SB.AddNumber(n);
    6201 }
    6202 
    6203 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6204 {
    6205  VMA_ASSERT(m_InsideString);
    6206  m_SB.AddPointer(ptr);
    6207 }
    6208 
    6209 void VmaJsonWriter::EndString(const char* pStr)
    6210 {
    6211  VMA_ASSERT(m_InsideString);
    6212  if(pStr != VMA_NULL && pStr[0] != '\0')
    6213  {
    6214  ContinueString(pStr);
    6215  }
    6216  m_SB.Add('"');
    6217  m_InsideString = false;
    6218 }
    6219 
    6220 void VmaJsonWriter::WriteNumber(uint32_t n)
    6221 {
    6222  VMA_ASSERT(!m_InsideString);
    6223  BeginValue(false);
    6224  m_SB.AddNumber(n);
    6225 }
    6226 
    6227 void VmaJsonWriter::WriteNumber(uint64_t n)
    6228 {
    6229  VMA_ASSERT(!m_InsideString);
    6230  BeginValue(false);
    6231  m_SB.AddNumber(n);
    6232 }
    6233 
    6234 void VmaJsonWriter::WriteBool(bool b)
    6235 {
    6236  VMA_ASSERT(!m_InsideString);
    6237  BeginValue(false);
    6238  m_SB.Add(b ? "true" : "false");
    6239 }
    6240 
    6241 void VmaJsonWriter::WriteNull()
    6242 {
    6243  VMA_ASSERT(!m_InsideString);
    6244  BeginValue(false);
    6245  m_SB.Add("null");
    6246 }
    6247 
    6248 void VmaJsonWriter::BeginValue(bool isString)
    6249 {
    6250  if(!m_Stack.empty())
    6251  {
    6252  StackItem& currItem = m_Stack.back();
    6253  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6254  currItem.valueCount % 2 == 0)
    6255  {
    6256  VMA_ASSERT(isString);
    6257  }
    6258 
    6259  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6260  currItem.valueCount % 2 != 0)
    6261  {
    6262  m_SB.Add(": ");
    6263  }
    6264  else if(currItem.valueCount > 0)
    6265  {
    6266  m_SB.Add(", ");
    6267  WriteIndent();
    6268  }
    6269  else
    6270  {
    6271  WriteIndent();
    6272  }
    6273  ++currItem.valueCount;
    6274  }
    6275 }
    6276 
    6277 void VmaJsonWriter::WriteIndent(bool oneLess)
    6278 {
    6279  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6280  {
    6281  m_SB.AddNewLine();
    6282 
    6283  size_t count = m_Stack.size();
    6284  if(count > 0 && oneLess)
    6285  {
    6286  --count;
    6287  }
    6288  for(size_t i = 0; i < count; ++i)
    6289  {
    6290  m_SB.Add(INDENT);
    6291  }
    6292  }
    6293 }
    6294 
    6295 #endif // #if VMA_STATS_STRING_ENABLED
    6296 
    6298 
    6299 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6300 {
    6301  if(IsUserDataString())
    6302  {
    6303  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6304 
    6305  FreeUserDataString(hAllocator);
    6306 
    6307  if(pUserData != VMA_NULL)
    6308  {
    6309  const char* const newStrSrc = (char*)pUserData;
    6310  const size_t newStrLen = strlen(newStrSrc);
    6311  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6312  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6313  m_pUserData = newStrDst;
    6314  }
    6315  }
    6316  else
    6317  {
    6318  m_pUserData = pUserData;
    6319  }
    6320 }
    6321 
    6322 void VmaAllocation_T::ChangeBlockAllocation(
    6323  VmaAllocator hAllocator,
    6324  VmaDeviceMemoryBlock* block,
    6325  VkDeviceSize offset)
    6326 {
    6327  VMA_ASSERT(block != VMA_NULL);
    6328  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6329 
    6330  // Move mapping reference counter from old block to new block.
    6331  if(block != m_BlockAllocation.m_Block)
    6332  {
    6333  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6334  if(IsPersistentMap())
    6335  ++mapRefCount;
    6336  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6337  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6338  }
    6339 
    6340  m_BlockAllocation.m_Block = block;
    6341  m_BlockAllocation.m_Offset = offset;
    6342 }
    6343 
    6344 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6345 {
    6346  VMA_ASSERT(newSize > 0);
    6347  m_Size = newSize;
    6348 }
    6349 
    6350 VkDeviceSize VmaAllocation_T::GetOffset() const
    6351 {
    6352  switch(m_Type)
    6353  {
    6354  case ALLOCATION_TYPE_BLOCK:
    6355  return m_BlockAllocation.m_Offset;
    6356  case ALLOCATION_TYPE_DEDICATED:
    6357  return 0;
    6358  default:
    6359  VMA_ASSERT(0);
    6360  return 0;
    6361  }
    6362 }
    6363 
    6364 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6365 {
    6366  switch(m_Type)
    6367  {
    6368  case ALLOCATION_TYPE_BLOCK:
    6369  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6370  case ALLOCATION_TYPE_DEDICATED:
    6371  return m_DedicatedAllocation.m_hMemory;
    6372  default:
    6373  VMA_ASSERT(0);
    6374  return VK_NULL_HANDLE;
    6375  }
    6376 }
    6377 
    6378 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6379 {
    6380  switch(m_Type)
    6381  {
    6382  case ALLOCATION_TYPE_BLOCK:
    6383  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6384  case ALLOCATION_TYPE_DEDICATED:
    6385  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6386  default:
    6387  VMA_ASSERT(0);
    6388  return UINT32_MAX;
    6389  }
    6390 }
    6391 
    6392 void* VmaAllocation_T::GetMappedData() const
    6393 {
    6394  switch(m_Type)
    6395  {
    6396  case ALLOCATION_TYPE_BLOCK:
    6397  if(m_MapCount != 0)
    6398  {
    6399  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6400  VMA_ASSERT(pBlockData != VMA_NULL);
    6401  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6402  }
    6403  else
    6404  {
    6405  return VMA_NULL;
    6406  }
    6407  break;
    6408  case ALLOCATION_TYPE_DEDICATED:
    6409  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6410  return m_DedicatedAllocation.m_pMappedData;
    6411  default:
    6412  VMA_ASSERT(0);
    6413  return VMA_NULL;
    6414  }
    6415 }
    6416 
    6417 bool VmaAllocation_T::CanBecomeLost() const
    6418 {
    6419  switch(m_Type)
    6420  {
    6421  case ALLOCATION_TYPE_BLOCK:
    6422  return m_BlockAllocation.m_CanBecomeLost;
    6423  case ALLOCATION_TYPE_DEDICATED:
    6424  return false;
    6425  default:
    6426  VMA_ASSERT(0);
    6427  return false;
    6428  }
    6429 }
    6430 
    6431 VmaPool VmaAllocation_T::GetPool() const
    6432 {
    6433  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6434  return m_BlockAllocation.m_hPool;
    6435 }
    6436 
    6437 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6438 {
    6439  VMA_ASSERT(CanBecomeLost());
    6440 
    6441  /*
    6442  Warning: This is a carefully designed algorithm.
    6443  Do not modify unless you really know what you're doing :)
    6444  */
    6445  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6446  for(;;)
    6447  {
    6448  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6449  {
    6450  VMA_ASSERT(0);
    6451  return false;
    6452  }
    6453  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6454  {
    6455  return false;
    6456  }
    6457  else // Last use time earlier than current time.
    6458  {
    6459  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6460  {
    6461  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6462  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6463  return true;
    6464  }
    6465  }
    6466  }
    6467 }
    6468 
    6469 #if VMA_STATS_STRING_ENABLED
    6470 
    6471 // Correspond to values of enum VmaSuballocationType.
    6472 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6473  "FREE",
    6474  "UNKNOWN",
    6475  "BUFFER",
    6476  "IMAGE_UNKNOWN",
    6477  "IMAGE_LINEAR",
    6478  "IMAGE_OPTIMAL",
    6479 };
    6480 
    6481 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6482 {
    6483  json.WriteString("Type");
    6484  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6485 
    6486  json.WriteString("Size");
    6487  json.WriteNumber(m_Size);
    6488 
    6489  if(m_pUserData != VMA_NULL)
    6490  {
    6491  json.WriteString("UserData");
    6492  if(IsUserDataString())
    6493  {
    6494  json.WriteString((const char*)m_pUserData);
    6495  }
    6496  else
    6497  {
    6498  json.BeginString();
    6499  json.ContinueString_Pointer(m_pUserData);
    6500  json.EndString();
    6501  }
    6502  }
    6503 
    6504  json.WriteString("CreationFrameIndex");
    6505  json.WriteNumber(m_CreationFrameIndex);
    6506 
    6507  json.WriteString("LastUseFrameIndex");
    6508  json.WriteNumber(GetLastUseFrameIndex());
    6509 
    6510  if(m_BufferImageUsage != 0)
    6511  {
    6512  json.WriteString("Usage");
    6513  json.WriteNumber(m_BufferImageUsage);
    6514  }
    6515 }
    6516 
    6517 #endif
    6518 
    6519 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6520 {
    6521  VMA_ASSERT(IsUserDataString());
    6522  if(m_pUserData != VMA_NULL)
    6523  {
    6524  char* const oldStr = (char*)m_pUserData;
    6525  const size_t oldStrLen = strlen(oldStr);
    6526  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6527  m_pUserData = VMA_NULL;
    6528  }
    6529 }
    6530 
    6531 void VmaAllocation_T::BlockAllocMap()
    6532 {
    6533  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6534 
    6535  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6536  {
    6537  ++m_MapCount;
    6538  }
    6539  else
    6540  {
    6541  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6542  }
    6543 }
    6544 
    6545 void VmaAllocation_T::BlockAllocUnmap()
    6546 {
    6547  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6548 
    6549  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6550  {
    6551  --m_MapCount;
    6552  }
    6553  else
    6554  {
    6555  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6556  }
    6557 }
    6558 
    6559 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6560 {
    6561  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6562 
    6563  if(m_MapCount != 0)
    6564  {
    6565  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6566  {
    6567  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6568  *ppData = m_DedicatedAllocation.m_pMappedData;
    6569  ++m_MapCount;
    6570  return VK_SUCCESS;
    6571  }
    6572  else
    6573  {
    6574  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6575  return VK_ERROR_MEMORY_MAP_FAILED;
    6576  }
    6577  }
    6578  else
    6579  {
    6580  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6581  hAllocator->m_hDevice,
    6582  m_DedicatedAllocation.m_hMemory,
    6583  0, // offset
    6584  VK_WHOLE_SIZE,
    6585  0, // flags
    6586  ppData);
    6587  if(result == VK_SUCCESS)
    6588  {
    6589  m_DedicatedAllocation.m_pMappedData = *ppData;
    6590  m_MapCount = 1;
    6591  }
    6592  return result;
    6593  }
    6594 }
    6595 
    6596 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6597 {
    6598  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6599 
    6600  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6601  {
    6602  --m_MapCount;
    6603  if(m_MapCount == 0)
    6604  {
    6605  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6606  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6607  hAllocator->m_hDevice,
    6608  m_DedicatedAllocation.m_hMemory);
    6609  }
    6610  }
    6611  else
    6612  {
    6613  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6614  }
    6615 }
    6616 
    6617 #if VMA_STATS_STRING_ENABLED
    6618 
    6619 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6620 {
    6621  json.BeginObject();
    6622 
    6623  json.WriteString("Blocks");
    6624  json.WriteNumber(stat.blockCount);
    6625 
    6626  json.WriteString("Allocations");
    6627  json.WriteNumber(stat.allocationCount);
    6628 
    6629  json.WriteString("UnusedRanges");
    6630  json.WriteNumber(stat.unusedRangeCount);
    6631 
    6632  json.WriteString("UsedBytes");
    6633  json.WriteNumber(stat.usedBytes);
    6634 
    6635  json.WriteString("UnusedBytes");
    6636  json.WriteNumber(stat.unusedBytes);
    6637 
    6638  if(stat.allocationCount > 1)
    6639  {
    6640  json.WriteString("AllocationSize");
    6641  json.BeginObject(true);
    6642  json.WriteString("Min");
    6643  json.WriteNumber(stat.allocationSizeMin);
    6644  json.WriteString("Avg");
    6645  json.WriteNumber(stat.allocationSizeAvg);
    6646  json.WriteString("Max");
    6647  json.WriteNumber(stat.allocationSizeMax);
    6648  json.EndObject();
    6649  }
    6650 
    6651  if(stat.unusedRangeCount > 1)
    6652  {
    6653  json.WriteString("UnusedRangeSize");
    6654  json.BeginObject(true);
    6655  json.WriteString("Min");
    6656  json.WriteNumber(stat.unusedRangeSizeMin);
    6657  json.WriteString("Avg");
    6658  json.WriteNumber(stat.unusedRangeSizeAvg);
    6659  json.WriteString("Max");
    6660  json.WriteNumber(stat.unusedRangeSizeMax);
    6661  json.EndObject();
    6662  }
    6663 
    6664  json.EndObject();
    6665 }
    6666 
    6667 #endif // #if VMA_STATS_STRING_ENABLED
    6668 
    6669 struct VmaSuballocationItemSizeLess
    6670 {
    6671  bool operator()(
    6672  const VmaSuballocationList::iterator lhs,
    6673  const VmaSuballocationList::iterator rhs) const
    6674  {
    6675  return lhs->size < rhs->size;
    6676  }
    6677  bool operator()(
    6678  const VmaSuballocationList::iterator lhs,
    6679  VkDeviceSize rhsSize) const
    6680  {
    6681  return lhs->size < rhsSize;
    6682  }
    6683 };
    6684 
    6685 
    6687 // class VmaBlockMetadata
    6688 
    6689 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6690  m_Size(0),
    6691  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6692 {
    6693 }
    6694 
    6695 #if VMA_STATS_STRING_ENABLED
    6696 
    6697 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6698  VkDeviceSize unusedBytes,
    6699  size_t allocationCount,
    6700  size_t unusedRangeCount) const
    6701 {
    6702  json.BeginObject();
    6703 
    6704  json.WriteString("TotalBytes");
    6705  json.WriteNumber(GetSize());
    6706 
    6707  json.WriteString("UnusedBytes");
    6708  json.WriteNumber(unusedBytes);
    6709 
    6710  json.WriteString("Allocations");
    6711  json.WriteNumber((uint64_t)allocationCount);
    6712 
    6713  json.WriteString("UnusedRanges");
    6714  json.WriteNumber((uint64_t)unusedRangeCount);
    6715 
    6716  json.WriteString("Suballocations");
    6717  json.BeginArray();
    6718 }
    6719 
    6720 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6721  VkDeviceSize offset,
    6722  VmaAllocation hAllocation) const
    6723 {
    6724  json.BeginObject(true);
    6725 
    6726  json.WriteString("Offset");
    6727  json.WriteNumber(offset);
    6728 
    6729  hAllocation->PrintParameters(json);
    6730 
    6731  json.EndObject();
    6732 }
    6733 
    6734 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6735  VkDeviceSize offset,
    6736  VkDeviceSize size) const
    6737 {
    6738  json.BeginObject(true);
    6739 
    6740  json.WriteString("Offset");
    6741  json.WriteNumber(offset);
    6742 
    6743  json.WriteString("Type");
    6744  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6745 
    6746  json.WriteString("Size");
    6747  json.WriteNumber(size);
    6748 
    6749  json.EndObject();
    6750 }
    6751 
    6752 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6753 {
    6754  json.EndArray();
    6755  json.EndObject();
    6756 }
    6757 
    6758 #endif // #if VMA_STATS_STRING_ENABLED
    6759 
    6761 // class VmaBlockMetadata_Generic
    6762 
    6763 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6764  VmaBlockMetadata(hAllocator),
    6765  m_FreeCount(0),
    6766  m_SumFreeSize(0),
    6767  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6768  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6769 {
    6770 }
    6771 
    6772 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6773 {
    6774 }
    6775 
    6776 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6777 {
    6778  VmaBlockMetadata::Init(size);
    6779 
    6780  m_FreeCount = 1;
    6781  m_SumFreeSize = size;
    6782 
    6783  VmaSuballocation suballoc = {};
    6784  suballoc.offset = 0;
    6785  suballoc.size = size;
    6786  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6787  suballoc.hAllocation = VK_NULL_HANDLE;
    6788 
    6789  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6790  m_Suballocations.push_back(suballoc);
    6791  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6792  --suballocItem;
    6793  m_FreeSuballocationsBySize.push_back(suballocItem);
    6794 }
    6795 
    6796 bool VmaBlockMetadata_Generic::Validate() const
    6797 {
    6798  VMA_VALIDATE(!m_Suballocations.empty());
    6799 
    6800  // Expected offset of new suballocation as calculated from previous ones.
    6801  VkDeviceSize calculatedOffset = 0;
    6802  // Expected number of free suballocations as calculated from traversing their list.
    6803  uint32_t calculatedFreeCount = 0;
    6804  // Expected sum size of free suballocations as calculated from traversing their list.
    6805  VkDeviceSize calculatedSumFreeSize = 0;
    6806  // Expected number of free suballocations that should be registered in
    6807  // m_FreeSuballocationsBySize calculated from traversing their list.
    6808  size_t freeSuballocationsToRegister = 0;
    6809  // True if previous visited suballocation was free.
    6810  bool prevFree = false;
    6811 
    6812  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6813  suballocItem != m_Suballocations.cend();
    6814  ++suballocItem)
    6815  {
    6816  const VmaSuballocation& subAlloc = *suballocItem;
    6817 
    6818  // Actual offset of this suballocation doesn't match expected one.
    6819  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6820 
    6821  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6822  // Two adjacent free suballocations are invalid. They should be merged.
    6823  VMA_VALIDATE(!prevFree || !currFree);
    6824 
    6825  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6826 
    6827  if(currFree)
    6828  {
    6829  calculatedSumFreeSize += subAlloc.size;
    6830  ++calculatedFreeCount;
    6831  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6832  {
    6833  ++freeSuballocationsToRegister;
    6834  }
    6835 
    6836  // Margin required between allocations - every free space must be at least that large.
    6837  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6838  }
    6839  else
    6840  {
    6841  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6843 
    6844  // Margin required between allocations - previous allocation must be free.
    6845  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6846  }
    6847 
    6848  calculatedOffset += subAlloc.size;
    6849  prevFree = currFree;
    6850  }
    6851 
    6852  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6853  // match expected one.
    6854  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6855 
    6856  VkDeviceSize lastSize = 0;
    6857  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6858  {
    6859  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6860 
    6861  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6862  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6863  // They must be sorted by size ascending.
    6864  VMA_VALIDATE(suballocItem->size >= lastSize);
    6865 
    6866  lastSize = suballocItem->size;
    6867  }
    6868 
    6869  // Check if totals match calculacted values.
    6870  VMA_VALIDATE(ValidateFreeSuballocationList());
    6871  VMA_VALIDATE(calculatedOffset == GetSize());
    6872  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6873  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6874 
    6875  return true;
    6876 }
    6877 
    6878 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6879 {
    6880  if(!m_FreeSuballocationsBySize.empty())
    6881  {
    6882  return m_FreeSuballocationsBySize.back()->size;
    6883  }
    6884  else
    6885  {
    6886  return 0;
    6887  }
    6888 }
    6889 
    6890 bool VmaBlockMetadata_Generic::IsEmpty() const
    6891 {
    6892  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6893 }
    6894 
    6895 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6896 {
    6897  outInfo.blockCount = 1;
    6898 
    6899  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6900  outInfo.allocationCount = rangeCount - m_FreeCount;
    6901  outInfo.unusedRangeCount = m_FreeCount;
    6902 
    6903  outInfo.unusedBytes = m_SumFreeSize;
    6904  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6905 
    6906  outInfo.allocationSizeMin = UINT64_MAX;
    6907  outInfo.allocationSizeMax = 0;
    6908  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6909  outInfo.unusedRangeSizeMax = 0;
    6910 
    6911  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6912  suballocItem != m_Suballocations.cend();
    6913  ++suballocItem)
    6914  {
    6915  const VmaSuballocation& suballoc = *suballocItem;
    6916  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6917  {
    6918  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6919  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6920  }
    6921  else
    6922  {
    6923  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6924  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6925  }
    6926  }
    6927 }
    6928 
    6929 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6930 {
    6931  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6932 
    6933  inoutStats.size += GetSize();
    6934  inoutStats.unusedSize += m_SumFreeSize;
    6935  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6936  inoutStats.unusedRangeCount += m_FreeCount;
    6937  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6938 }
    6939 
    6940 #if VMA_STATS_STRING_ENABLED
    6941 
    6942 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6943 {
    6944  PrintDetailedMap_Begin(json,
    6945  m_SumFreeSize, // unusedBytes
    6946  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6947  m_FreeCount); // unusedRangeCount
    6948 
    6949  size_t i = 0;
    6950  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6951  suballocItem != m_Suballocations.cend();
    6952  ++suballocItem, ++i)
    6953  {
    6954  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6955  {
    6956  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6957  }
    6958  else
    6959  {
    6960  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6961  }
    6962  }
    6963 
    6964  PrintDetailedMap_End(json);
    6965 }
    6966 
    6967 #endif // #if VMA_STATS_STRING_ENABLED
    6968 
    6969 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6970  uint32_t currentFrameIndex,
    6971  uint32_t frameInUseCount,
    6972  VkDeviceSize bufferImageGranularity,
    6973  VkDeviceSize allocSize,
    6974  VkDeviceSize allocAlignment,
    6975  bool upperAddress,
    6976  VmaSuballocationType allocType,
    6977  bool canMakeOtherLost,
    6978  uint32_t strategy,
    6979  VmaAllocationRequest* pAllocationRequest)
    6980 {
    6981  VMA_ASSERT(allocSize > 0);
    6982  VMA_ASSERT(!upperAddress);
    6983  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6984  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6985  VMA_HEAVY_ASSERT(Validate());
    6986 
    6987  // There is not enough total free space in this block to fullfill the request: Early return.
    6988  if(canMakeOtherLost == false &&
    6989  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6990  {
    6991  return false;
    6992  }
    6993 
    6994  // New algorithm, efficiently searching freeSuballocationsBySize.
    6995  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6996  if(freeSuballocCount > 0)
    6997  {
    6999  {
    7000  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7001  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7002  m_FreeSuballocationsBySize.data(),
    7003  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7004  allocSize + 2 * VMA_DEBUG_MARGIN,
    7005  VmaSuballocationItemSizeLess());
    7006  size_t index = it - m_FreeSuballocationsBySize.data();
    7007  for(; index < freeSuballocCount; ++index)
    7008  {
    7009  if(CheckAllocation(
    7010  currentFrameIndex,
    7011  frameInUseCount,
    7012  bufferImageGranularity,
    7013  allocSize,
    7014  allocAlignment,
    7015  allocType,
    7016  m_FreeSuballocationsBySize[index],
    7017  false, // canMakeOtherLost
    7018  &pAllocationRequest->offset,
    7019  &pAllocationRequest->itemsToMakeLostCount,
    7020  &pAllocationRequest->sumFreeSize,
    7021  &pAllocationRequest->sumItemSize))
    7022  {
    7023  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7024  return true;
    7025  }
    7026  }
    7027  }
    7028  else // WORST_FIT, FIRST_FIT
    7029  {
    7030  // Search staring from biggest suballocations.
    7031  for(size_t index = freeSuballocCount; index--; )
    7032  {
    7033  if(CheckAllocation(
    7034  currentFrameIndex,
    7035  frameInUseCount,
    7036  bufferImageGranularity,
    7037  allocSize,
    7038  allocAlignment,
    7039  allocType,
    7040  m_FreeSuballocationsBySize[index],
    7041  false, // canMakeOtherLost
    7042  &pAllocationRequest->offset,
    7043  &pAllocationRequest->itemsToMakeLostCount,
    7044  &pAllocationRequest->sumFreeSize,
    7045  &pAllocationRequest->sumItemSize))
    7046  {
    7047  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7048  return true;
    7049  }
    7050  }
    7051  }
    7052  }
    7053 
    7054  if(canMakeOtherLost)
    7055  {
    7056  // Brute-force algorithm. TODO: Come up with something better.
    7057 
    7058  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7059  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7060 
    7061  VmaAllocationRequest tmpAllocRequest = {};
    7062  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7063  suballocIt != m_Suballocations.end();
    7064  ++suballocIt)
    7065  {
    7066  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7067  suballocIt->hAllocation->CanBecomeLost())
    7068  {
    7069  if(CheckAllocation(
    7070  currentFrameIndex,
    7071  frameInUseCount,
    7072  bufferImageGranularity,
    7073  allocSize,
    7074  allocAlignment,
    7075  allocType,
    7076  suballocIt,
    7077  canMakeOtherLost,
    7078  &tmpAllocRequest.offset,
    7079  &tmpAllocRequest.itemsToMakeLostCount,
    7080  &tmpAllocRequest.sumFreeSize,
    7081  &tmpAllocRequest.sumItemSize))
    7082  {
    7083  tmpAllocRequest.item = suballocIt;
    7084 
    7085  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7087  {
    7088  *pAllocationRequest = tmpAllocRequest;
    7089  }
    7090  }
    7091  }
    7092  }
    7093 
    7094  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7095  {
    7096  return true;
    7097  }
    7098  }
    7099 
    7100  return false;
    7101 }
    7102 
    7103 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7104  uint32_t currentFrameIndex,
    7105  uint32_t frameInUseCount,
    7106  VmaAllocationRequest* pAllocationRequest)
    7107 {
    7108  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7109  {
    7110  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7111  {
    7112  ++pAllocationRequest->item;
    7113  }
    7114  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7115  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7117  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7118  {
    7119  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7120  --pAllocationRequest->itemsToMakeLostCount;
    7121  }
    7122  else
    7123  {
    7124  return false;
    7125  }
    7126  }
    7127 
    7128  VMA_HEAVY_ASSERT(Validate());
    7129  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7130  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7131 
    7132  return true;
    7133 }
    7134 
    7135 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7136 {
    7137  uint32_t lostAllocationCount = 0;
    7138  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7139  it != m_Suballocations.end();
    7140  ++it)
    7141  {
    7142  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7143  it->hAllocation->CanBecomeLost() &&
    7144  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7145  {
    7146  it = FreeSuballocation(it);
    7147  ++lostAllocationCount;
    7148  }
    7149  }
    7150  return lostAllocationCount;
    7151 }
    7152 
    7153 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7154 {
    7155  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7156  it != m_Suballocations.end();
    7157  ++it)
    7158  {
    7159  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7160  {
    7161  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7162  {
    7163  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7164  return VK_ERROR_VALIDATION_FAILED_EXT;
    7165  }
    7166  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7167  {
    7168  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7169  return VK_ERROR_VALIDATION_FAILED_EXT;
    7170  }
    7171  }
    7172  }
    7173 
    7174  return VK_SUCCESS;
    7175 }
    7176 
    7177 void VmaBlockMetadata_Generic::Alloc(
    7178  const VmaAllocationRequest& request,
    7179  VmaSuballocationType type,
    7180  VkDeviceSize allocSize,
    7181  bool upperAddress,
    7182  VmaAllocation hAllocation)
    7183 {
    7184  VMA_ASSERT(!upperAddress);
    7185  VMA_ASSERT(request.item != m_Suballocations.end());
    7186  VmaSuballocation& suballoc = *request.item;
    7187  // Given suballocation is a free block.
    7188  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7189  // Given offset is inside this suballocation.
    7190  VMA_ASSERT(request.offset >= suballoc.offset);
    7191  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7192  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7193  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7194 
    7195  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7196  // it to become used.
    7197  UnregisterFreeSuballocation(request.item);
    7198 
    7199  suballoc.offset = request.offset;
    7200  suballoc.size = allocSize;
    7201  suballoc.type = type;
    7202  suballoc.hAllocation = hAllocation;
    7203 
    7204  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7205  if(paddingEnd)
    7206  {
    7207  VmaSuballocation paddingSuballoc = {};
    7208  paddingSuballoc.offset = request.offset + allocSize;
    7209  paddingSuballoc.size = paddingEnd;
    7210  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7211  VmaSuballocationList::iterator next = request.item;
    7212  ++next;
    7213  const VmaSuballocationList::iterator paddingEndItem =
    7214  m_Suballocations.insert(next, paddingSuballoc);
    7215  RegisterFreeSuballocation(paddingEndItem);
    7216  }
    7217 
    7218  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7219  if(paddingBegin)
    7220  {
    7221  VmaSuballocation paddingSuballoc = {};
    7222  paddingSuballoc.offset = request.offset - paddingBegin;
    7223  paddingSuballoc.size = paddingBegin;
    7224  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7225  const VmaSuballocationList::iterator paddingBeginItem =
    7226  m_Suballocations.insert(request.item, paddingSuballoc);
    7227  RegisterFreeSuballocation(paddingBeginItem);
    7228  }
    7229 
    7230  // Update totals.
    7231  m_FreeCount = m_FreeCount - 1;
    7232  if(paddingBegin > 0)
    7233  {
    7234  ++m_FreeCount;
    7235  }
    7236  if(paddingEnd > 0)
    7237  {
    7238  ++m_FreeCount;
    7239  }
    7240  m_SumFreeSize -= allocSize;
    7241 }
    7242 
    7243 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7244 {
    7245  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7246  suballocItem != m_Suballocations.end();
    7247  ++suballocItem)
    7248  {
    7249  VmaSuballocation& suballoc = *suballocItem;
    7250  if(suballoc.hAllocation == allocation)
    7251  {
    7252  FreeSuballocation(suballocItem);
    7253  VMA_HEAVY_ASSERT(Validate());
    7254  return;
    7255  }
    7256  }
    7257  VMA_ASSERT(0 && "Not found!");
    7258 }
    7259 
    7260 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7261 {
    7262  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7263  suballocItem != m_Suballocations.end();
    7264  ++suballocItem)
    7265  {
    7266  VmaSuballocation& suballoc = *suballocItem;
    7267  if(suballoc.offset == offset)
    7268  {
    7269  FreeSuballocation(suballocItem);
    7270  return;
    7271  }
    7272  }
    7273  VMA_ASSERT(0 && "Not found!");
    7274 }
    7275 
    7276 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7277 {
    7278  typedef VmaSuballocationList::iterator iter_type;
    7279  for(iter_type suballocItem = m_Suballocations.begin();
    7280  suballocItem != m_Suballocations.end();
    7281  ++suballocItem)
    7282  {
    7283  VmaSuballocation& suballoc = *suballocItem;
    7284  if(suballoc.hAllocation == alloc)
    7285  {
    7286  iter_type nextItem = suballocItem;
    7287  ++nextItem;
    7288 
    7289  // Should have been ensured on higher level.
    7290  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7291 
    7292  // Shrinking.
    7293  if(newSize < alloc->GetSize())
    7294  {
    7295  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7296 
    7297  // There is next item.
    7298  if(nextItem != m_Suballocations.end())
    7299  {
    7300  // Next item is free.
    7301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7302  {
    7303  // Grow this next item backward.
    7304  UnregisterFreeSuballocation(nextItem);
    7305  nextItem->offset -= sizeDiff;
    7306  nextItem->size += sizeDiff;
    7307  RegisterFreeSuballocation(nextItem);
    7308  }
    7309  // Next item is not free.
    7310  else
    7311  {
    7312  // Create free item after current one.
    7313  VmaSuballocation newFreeSuballoc;
    7314  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7315  newFreeSuballoc.offset = suballoc.offset + newSize;
    7316  newFreeSuballoc.size = sizeDiff;
    7317  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7318  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7319  RegisterFreeSuballocation(newFreeSuballocIt);
    7320 
    7321  ++m_FreeCount;
    7322  }
    7323  }
    7324  // This is the last item.
    7325  else
    7326  {
    7327  // Create free item at the end.
    7328  VmaSuballocation newFreeSuballoc;
    7329  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7330  newFreeSuballoc.offset = suballoc.offset + newSize;
    7331  newFreeSuballoc.size = sizeDiff;
    7332  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7333  m_Suballocations.push_back(newFreeSuballoc);
    7334 
    7335  iter_type newFreeSuballocIt = m_Suballocations.end();
    7336  RegisterFreeSuballocation(--newFreeSuballocIt);
    7337 
    7338  ++m_FreeCount;
    7339  }
    7340 
    7341  suballoc.size = newSize;
    7342  m_SumFreeSize += sizeDiff;
    7343  }
    7344  // Growing.
    7345  else
    7346  {
    7347  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7348 
    7349  // There is next item.
    7350  if(nextItem != m_Suballocations.end())
    7351  {
    7352  // Next item is free.
    7353  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7354  {
    7355  // There is not enough free space, including margin.
    7356  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7357  {
    7358  return false;
    7359  }
    7360 
    7361  // There is more free space than required.
    7362  if(nextItem->size > sizeDiff)
    7363  {
    7364  // Move and shrink this next item.
    7365  UnregisterFreeSuballocation(nextItem);
    7366  nextItem->offset += sizeDiff;
    7367  nextItem->size -= sizeDiff;
    7368  RegisterFreeSuballocation(nextItem);
    7369  }
    7370  // There is exactly the amount of free space required.
    7371  else
    7372  {
    7373  // Remove this next free item.
    7374  UnregisterFreeSuballocation(nextItem);
    7375  m_Suballocations.erase(nextItem);
    7376  --m_FreeCount;
    7377  }
    7378  }
    7379  // Next item is not free - there is no space to grow.
    7380  else
    7381  {
    7382  return false;
    7383  }
    7384  }
    7385  // This is the last item - there is no space to grow.
    7386  else
    7387  {
    7388  return false;
    7389  }
    7390 
    7391  suballoc.size = newSize;
    7392  m_SumFreeSize -= sizeDiff;
    7393  }
    7394 
    7395  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7396  return true;
    7397  }
    7398  }
    7399  VMA_ASSERT(0 && "Not found!");
    7400  return false;
    7401 }
    7402 
    7403 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7404 {
    7405  VkDeviceSize lastSize = 0;
    7406  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7407  {
    7408  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7409 
    7410  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7411  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7412  VMA_VALIDATE(it->size >= lastSize);
    7413  lastSize = it->size;
    7414  }
    7415  return true;
    7416 }
    7417 
    7418 bool VmaBlockMetadata_Generic::CheckAllocation(
    7419  uint32_t currentFrameIndex,
    7420  uint32_t frameInUseCount,
    7421  VkDeviceSize bufferImageGranularity,
    7422  VkDeviceSize allocSize,
    7423  VkDeviceSize allocAlignment,
    7424  VmaSuballocationType allocType,
    7425  VmaSuballocationList::const_iterator suballocItem,
    7426  bool canMakeOtherLost,
    7427  VkDeviceSize* pOffset,
    7428  size_t* itemsToMakeLostCount,
    7429  VkDeviceSize* pSumFreeSize,
    7430  VkDeviceSize* pSumItemSize) const
    7431 {
    7432  VMA_ASSERT(allocSize > 0);
    7433  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7434  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7435  VMA_ASSERT(pOffset != VMA_NULL);
    7436 
    7437  *itemsToMakeLostCount = 0;
    7438  *pSumFreeSize = 0;
    7439  *pSumItemSize = 0;
    7440 
    7441  if(canMakeOtherLost)
    7442  {
    7443  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7444  {
    7445  *pSumFreeSize = suballocItem->size;
    7446  }
    7447  else
    7448  {
    7449  if(suballocItem->hAllocation->CanBecomeLost() &&
    7450  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7451  {
    7452  ++*itemsToMakeLostCount;
    7453  *pSumItemSize = suballocItem->size;
    7454  }
    7455  else
    7456  {
    7457  return false;
    7458  }
    7459  }
    7460 
    7461  // Remaining size is too small for this request: Early return.
    7462  if(GetSize() - suballocItem->offset < allocSize)
    7463  {
    7464  return false;
    7465  }
    7466 
    7467  // Start from offset equal to beginning of this suballocation.
    7468  *pOffset = suballocItem->offset;
    7469 
    7470  // Apply VMA_DEBUG_MARGIN at the beginning.
    7471  if(VMA_DEBUG_MARGIN > 0)
    7472  {
    7473  *pOffset += VMA_DEBUG_MARGIN;
    7474  }
    7475 
    7476  // Apply alignment.
    7477  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7478 
    7479  // Check previous suballocations for BufferImageGranularity conflicts.
    7480  // Make bigger alignment if necessary.
    7481  if(bufferImageGranularity > 1)
    7482  {
    7483  bool bufferImageGranularityConflict = false;
    7484  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7485  while(prevSuballocItem != m_Suballocations.cbegin())
    7486  {
    7487  --prevSuballocItem;
    7488  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7489  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7490  {
    7491  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7492  {
    7493  bufferImageGranularityConflict = true;
    7494  break;
    7495  }
    7496  }
    7497  else
    7498  // Already on previous page.
    7499  break;
    7500  }
    7501  if(bufferImageGranularityConflict)
    7502  {
    7503  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7504  }
    7505  }
    7506 
    7507  // Now that we have final *pOffset, check if we are past suballocItem.
    7508  // If yes, return false - this function should be called for another suballocItem as starting point.
    7509  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7510  {
    7511  return false;
    7512  }
    7513 
    7514  // Calculate padding at the beginning based on current offset.
    7515  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7516 
    7517  // Calculate required margin at the end.
    7518  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7519 
    7520  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7521  // Another early return check.
    7522  if(suballocItem->offset + totalSize > GetSize())
    7523  {
    7524  return false;
    7525  }
    7526 
    7527  // Advance lastSuballocItem until desired size is reached.
    7528  // Update itemsToMakeLostCount.
    7529  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7530  if(totalSize > suballocItem->size)
    7531  {
    7532  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7533  while(remainingSize > 0)
    7534  {
    7535  ++lastSuballocItem;
    7536  if(lastSuballocItem == m_Suballocations.cend())
    7537  {
    7538  return false;
    7539  }
    7540  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7541  {
    7542  *pSumFreeSize += lastSuballocItem->size;
    7543  }
    7544  else
    7545  {
    7546  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7547  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7548  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7549  {
    7550  ++*itemsToMakeLostCount;
    7551  *pSumItemSize += lastSuballocItem->size;
    7552  }
    7553  else
    7554  {
    7555  return false;
    7556  }
    7557  }
    7558  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7559  remainingSize - lastSuballocItem->size : 0;
    7560  }
    7561  }
    7562 
    7563  // Check next suballocations for BufferImageGranularity conflicts.
    7564  // If conflict exists, we must mark more allocations lost or fail.
    7565  if(bufferImageGranularity > 1)
    7566  {
    7567  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7568  ++nextSuballocItem;
    7569  while(nextSuballocItem != m_Suballocations.cend())
    7570  {
    7571  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7572  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7573  {
    7574  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7575  {
    7576  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7577  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7578  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7579  {
    7580  ++*itemsToMakeLostCount;
    7581  }
    7582  else
    7583  {
    7584  return false;
    7585  }
    7586  }
    7587  }
    7588  else
    7589  {
    7590  // Already on next page.
    7591  break;
    7592  }
    7593  ++nextSuballocItem;
    7594  }
    7595  }
    7596  }
    7597  else
    7598  {
    7599  const VmaSuballocation& suballoc = *suballocItem;
    7600  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7601 
    7602  *pSumFreeSize = suballoc.size;
    7603 
    7604  // Size of this suballocation is too small for this request: Early return.
    7605  if(suballoc.size < allocSize)
    7606  {
    7607  return false;
    7608  }
    7609 
    7610  // Start from offset equal to beginning of this suballocation.
    7611  *pOffset = suballoc.offset;
    7612 
    7613  // Apply VMA_DEBUG_MARGIN at the beginning.
    7614  if(VMA_DEBUG_MARGIN > 0)
    7615  {
    7616  *pOffset += VMA_DEBUG_MARGIN;
    7617  }
    7618 
    7619  // Apply alignment.
    7620  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7621 
    7622  // Check previous suballocations for BufferImageGranularity conflicts.
    7623  // Make bigger alignment if necessary.
    7624  if(bufferImageGranularity > 1)
    7625  {
    7626  bool bufferImageGranularityConflict = false;
    7627  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7628  while(prevSuballocItem != m_Suballocations.cbegin())
    7629  {
    7630  --prevSuballocItem;
    7631  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7632  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7633  {
    7634  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7635  {
    7636  bufferImageGranularityConflict = true;
    7637  break;
    7638  }
    7639  }
    7640  else
    7641  // Already on previous page.
    7642  break;
    7643  }
    7644  if(bufferImageGranularityConflict)
    7645  {
    7646  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7647  }
    7648  }
    7649 
    7650  // Calculate padding at the beginning based on current offset.
    7651  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7652 
    7653  // Calculate required margin at the end.
    7654  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7655 
    7656  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7657  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7658  {
    7659  return false;
    7660  }
    7661 
    7662  // Check next suballocations for BufferImageGranularity conflicts.
    7663  // If conflict exists, allocation cannot be made here.
    7664  if(bufferImageGranularity > 1)
    7665  {
    7666  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7667  ++nextSuballocItem;
    7668  while(nextSuballocItem != m_Suballocations.cend())
    7669  {
    7670  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7671  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7672  {
    7673  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7674  {
    7675  return false;
    7676  }
    7677  }
    7678  else
    7679  {
    7680  // Already on next page.
    7681  break;
    7682  }
    7683  ++nextSuballocItem;
    7684  }
    7685  }
    7686  }
    7687 
    7688  // All tests passed: Success. pOffset is already filled.
    7689  return true;
    7690 }
    7691 
    7692 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7693 {
    7694  VMA_ASSERT(item != m_Suballocations.end());
    7695  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7696 
    7697  VmaSuballocationList::iterator nextItem = item;
    7698  ++nextItem;
    7699  VMA_ASSERT(nextItem != m_Suballocations.end());
    7700  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7701 
    7702  item->size += nextItem->size;
    7703  --m_FreeCount;
    7704  m_Suballocations.erase(nextItem);
    7705 }
    7706 
    7707 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7708 {
    7709  // Change this suballocation to be marked as free.
    7710  VmaSuballocation& suballoc = *suballocItem;
    7711  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7712  suballoc.hAllocation = VK_NULL_HANDLE;
    7713 
    7714  // Update totals.
    7715  ++m_FreeCount;
    7716  m_SumFreeSize += suballoc.size;
    7717 
    7718  // Merge with previous and/or next suballocation if it's also free.
    7719  bool mergeWithNext = false;
    7720  bool mergeWithPrev = false;
    7721 
    7722  VmaSuballocationList::iterator nextItem = suballocItem;
    7723  ++nextItem;
    7724  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7725  {
    7726  mergeWithNext = true;
    7727  }
    7728 
    7729  VmaSuballocationList::iterator prevItem = suballocItem;
    7730  if(suballocItem != m_Suballocations.begin())
    7731  {
    7732  --prevItem;
    7733  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7734  {
    7735  mergeWithPrev = true;
    7736  }
    7737  }
    7738 
    7739  if(mergeWithNext)
    7740  {
    7741  UnregisterFreeSuballocation(nextItem);
    7742  MergeFreeWithNext(suballocItem);
    7743  }
    7744 
    7745  if(mergeWithPrev)
    7746  {
    7747  UnregisterFreeSuballocation(prevItem);
    7748  MergeFreeWithNext(prevItem);
    7749  RegisterFreeSuballocation(prevItem);
    7750  return prevItem;
    7751  }
    7752  else
    7753  {
    7754  RegisterFreeSuballocation(suballocItem);
    7755  return suballocItem;
    7756  }
    7757 }
    7758 
    7759 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7760 {
    7761  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7762  VMA_ASSERT(item->size > 0);
    7763 
    7764  // You may want to enable this validation at the beginning or at the end of
    7765  // this function, depending on what do you want to check.
    7766  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7767 
    7768  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7769  {
    7770  if(m_FreeSuballocationsBySize.empty())
    7771  {
    7772  m_FreeSuballocationsBySize.push_back(item);
    7773  }
    7774  else
    7775  {
    7776  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7777  }
    7778  }
    7779 
    7780  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7781 }
    7782 
    7783 
    7784 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7785 {
    7786  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7787  VMA_ASSERT(item->size > 0);
    7788 
    7789  // You may want to enable this validation at the beginning or at the end of
    7790  // this function, depending on what do you want to check.
    7791  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7792 
    7793  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7794  {
    7795  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7796  m_FreeSuballocationsBySize.data(),
    7797  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7798  item,
    7799  VmaSuballocationItemSizeLess());
    7800  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7801  index < m_FreeSuballocationsBySize.size();
    7802  ++index)
    7803  {
    7804  if(m_FreeSuballocationsBySize[index] == item)
    7805  {
    7806  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7807  return;
    7808  }
    7809  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7810  }
    7811  VMA_ASSERT(0 && "Not found.");
    7812  }
    7813 
    7814  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7815 }
    7816 
    7818 // class VmaBlockMetadata_Linear
    7819 
    7820 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7821  VmaBlockMetadata(hAllocator),
    7822  m_SumFreeSize(0),
    7823  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7824  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_1stVectorIndex(0),
    7826  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7827  m_1stNullItemsBeginCount(0),
    7828  m_1stNullItemsMiddleCount(0),
    7829  m_2ndNullItemsCount(0)
    7830 {
    7831 }
    7832 
    7833 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7834 {
    7835 }
    7836 
    7837 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7838 {
    7839  VmaBlockMetadata::Init(size);
    7840  m_SumFreeSize = size;
    7841 }
    7842 
    7843 bool VmaBlockMetadata_Linear::Validate() const
    7844 {
    7845  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7846  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7847 
    7848  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7849  VMA_VALIDATE(!suballocations1st.empty() ||
    7850  suballocations2nd.empty() ||
    7851  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7852 
    7853  if(!suballocations1st.empty())
    7854  {
    7855  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7856  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7857  // Null item at the end should be just pop_back().
    7858  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7859  }
    7860  if(!suballocations2nd.empty())
    7861  {
    7862  // Null item at the end should be just pop_back().
    7863  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7864  }
    7865 
    7866  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7867  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7868 
    7869  VkDeviceSize sumUsedSize = 0;
    7870  const size_t suballoc1stCount = suballocations1st.size();
    7871  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7872 
    7873  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7874  {
    7875  const size_t suballoc2ndCount = suballocations2nd.size();
    7876  size_t nullItem2ndCount = 0;
    7877  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7878  {
    7879  const VmaSuballocation& suballoc = suballocations2nd[i];
    7880  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7881 
    7882  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7883  VMA_VALIDATE(suballoc.offset >= offset);
    7884 
    7885  if(!currFree)
    7886  {
    7887  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7888  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7889  sumUsedSize += suballoc.size;
    7890  }
    7891  else
    7892  {
    7893  ++nullItem2ndCount;
    7894  }
    7895 
    7896  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7897  }
    7898 
    7899  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7900  }
    7901 
    7902  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7903  {
    7904  const VmaSuballocation& suballoc = suballocations1st[i];
    7905  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7906  suballoc.hAllocation == VK_NULL_HANDLE);
    7907  }
    7908 
    7909  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7910 
    7911  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7912  {
    7913  const VmaSuballocation& suballoc = suballocations1st[i];
    7914  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7915 
    7916  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7917  VMA_VALIDATE(suballoc.offset >= offset);
    7918  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7919 
    7920  if(!currFree)
    7921  {
    7922  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7923  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7924  sumUsedSize += suballoc.size;
    7925  }
    7926  else
    7927  {
    7928  ++nullItem1stCount;
    7929  }
    7930 
    7931  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7932  }
    7933  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7934 
    7935  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7936  {
    7937  const size_t suballoc2ndCount = suballocations2nd.size();
    7938  size_t nullItem2ndCount = 0;
    7939  for(size_t i = suballoc2ndCount; i--; )
    7940  {
    7941  const VmaSuballocation& suballoc = suballocations2nd[i];
    7942  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7943 
    7944  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7945  VMA_VALIDATE(suballoc.offset >= offset);
    7946 
    7947  if(!currFree)
    7948  {
    7949  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7950  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7951  sumUsedSize += suballoc.size;
    7952  }
    7953  else
    7954  {
    7955  ++nullItem2ndCount;
    7956  }
    7957 
    7958  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7959  }
    7960 
    7961  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7962  }
    7963 
    7964  VMA_VALIDATE(offset <= GetSize());
    7965  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7966 
    7967  return true;
    7968 }
    7969 
    7970 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7971 {
    7972  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7973  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7974 }
    7975 
    7976 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7977 {
    7978  const VkDeviceSize size = GetSize();
    7979 
    7980  /*
    7981  We don't consider gaps inside allocation vectors with freed allocations because
    7982  they are not suitable for reuse in linear allocator. We consider only space that
    7983  is available for new allocations.
    7984  */
    7985  if(IsEmpty())
    7986  {
    7987  return size;
    7988  }
    7989 
    7990  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7991 
    7992  switch(m_2ndVectorMode)
    7993  {
    7994  case SECOND_VECTOR_EMPTY:
    7995  /*
    7996  Available space is after end of 1st, as well as before beginning of 1st (which
    7997  whould make it a ring buffer).
    7998  */
    7999  {
    8000  const size_t suballocations1stCount = suballocations1st.size();
    8001  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8002  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8003  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8004  return VMA_MAX(
    8005  firstSuballoc.offset,
    8006  size - (lastSuballoc.offset + lastSuballoc.size));
    8007  }
    8008  break;
    8009 
    8010  case SECOND_VECTOR_RING_BUFFER:
    8011  /*
    8012  Available space is only between end of 2nd and beginning of 1st.
    8013  */
    8014  {
    8015  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8016  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8017  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8018  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8019  }
    8020  break;
    8021 
    8022  case SECOND_VECTOR_DOUBLE_STACK:
    8023  /*
    8024  Available space is only between end of 1st and top of 2nd.
    8025  */
    8026  {
    8027  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8028  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8029  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8030  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8031  }
    8032  break;
    8033 
    8034  default:
    8035  VMA_ASSERT(0);
    8036  return 0;
    8037  }
    8038 }
    8039 
    8040 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8041 {
    8042  const VkDeviceSize size = GetSize();
    8043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8045  const size_t suballoc1stCount = suballocations1st.size();
    8046  const size_t suballoc2ndCount = suballocations2nd.size();
    8047 
    8048  outInfo.blockCount = 1;
    8049  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8050  outInfo.unusedRangeCount = 0;
    8051  outInfo.usedBytes = 0;
    8052  outInfo.allocationSizeMin = UINT64_MAX;
    8053  outInfo.allocationSizeMax = 0;
    8054  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8055  outInfo.unusedRangeSizeMax = 0;
    8056 
    8057  VkDeviceSize lastOffset = 0;
    8058 
    8059  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8060  {
    8061  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8062  size_t nextAlloc2ndIndex = 0;
    8063  while(lastOffset < freeSpace2ndTo1stEnd)
    8064  {
    8065  // Find next non-null allocation or move nextAllocIndex to the end.
    8066  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8067  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8068  {
    8069  ++nextAlloc2ndIndex;
    8070  }
    8071 
    8072  // Found non-null allocation.
    8073  if(nextAlloc2ndIndex < suballoc2ndCount)
    8074  {
    8075  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8076 
    8077  // 1. Process free space before this allocation.
    8078  if(lastOffset < suballoc.offset)
    8079  {
    8080  // There is free space from lastOffset to suballoc.offset.
    8081  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8082  ++outInfo.unusedRangeCount;
    8083  outInfo.unusedBytes += unusedRangeSize;
    8084  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8085  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8086  }
    8087 
    8088  // 2. Process this allocation.
    8089  // There is allocation with suballoc.offset, suballoc.size.
    8090  outInfo.usedBytes += suballoc.size;
    8091  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8092  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8093 
    8094  // 3. Prepare for next iteration.
    8095  lastOffset = suballoc.offset + suballoc.size;
    8096  ++nextAlloc2ndIndex;
    8097  }
    8098  // We are at the end.
    8099  else
    8100  {
    8101  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8102  if(lastOffset < freeSpace2ndTo1stEnd)
    8103  {
    8104  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8105  ++outInfo.unusedRangeCount;
    8106  outInfo.unusedBytes += unusedRangeSize;
    8107  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8108  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8109  }
    8110 
    8111  // End of loop.
    8112  lastOffset = freeSpace2ndTo1stEnd;
    8113  }
    8114  }
    8115  }
    8116 
    8117  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8118  const VkDeviceSize freeSpace1stTo2ndEnd =
    8119  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8120  while(lastOffset < freeSpace1stTo2ndEnd)
    8121  {
    8122  // Find next non-null allocation or move nextAllocIndex to the end.
    8123  while(nextAlloc1stIndex < suballoc1stCount &&
    8124  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8125  {
    8126  ++nextAlloc1stIndex;
    8127  }
    8128 
    8129  // Found non-null allocation.
    8130  if(nextAlloc1stIndex < suballoc1stCount)
    8131  {
    8132  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8133 
    8134  // 1. Process free space before this allocation.
    8135  if(lastOffset < suballoc.offset)
    8136  {
    8137  // There is free space from lastOffset to suballoc.offset.
    8138  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8139  ++outInfo.unusedRangeCount;
    8140  outInfo.unusedBytes += unusedRangeSize;
    8141  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8142  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8143  }
    8144 
    8145  // 2. Process this allocation.
    8146  // There is allocation with suballoc.offset, suballoc.size.
    8147  outInfo.usedBytes += suballoc.size;
    8148  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8149  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8150 
    8151  // 3. Prepare for next iteration.
    8152  lastOffset = suballoc.offset + suballoc.size;
    8153  ++nextAlloc1stIndex;
    8154  }
    8155  // We are at the end.
    8156  else
    8157  {
    8158  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8159  if(lastOffset < freeSpace1stTo2ndEnd)
    8160  {
    8161  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8162  ++outInfo.unusedRangeCount;
    8163  outInfo.unusedBytes += unusedRangeSize;
    8164  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8165  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8166  }
    8167 
    8168  // End of loop.
    8169  lastOffset = freeSpace1stTo2ndEnd;
    8170  }
    8171  }
    8172 
    8173  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8174  {
    8175  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8176  while(lastOffset < size)
    8177  {
    8178  // Find next non-null allocation or move nextAllocIndex to the end.
    8179  while(nextAlloc2ndIndex != SIZE_MAX &&
    8180  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8181  {
    8182  --nextAlloc2ndIndex;
    8183  }
    8184 
    8185  // Found non-null allocation.
    8186  if(nextAlloc2ndIndex != SIZE_MAX)
    8187  {
    8188  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8189 
    8190  // 1. Process free space before this allocation.
    8191  if(lastOffset < suballoc.offset)
    8192  {
    8193  // There is free space from lastOffset to suballoc.offset.
    8194  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8195  ++outInfo.unusedRangeCount;
    8196  outInfo.unusedBytes += unusedRangeSize;
    8197  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8198  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8199  }
    8200 
    8201  // 2. Process this allocation.
    8202  // There is allocation with suballoc.offset, suballoc.size.
    8203  outInfo.usedBytes += suballoc.size;
    8204  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8205  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8206 
    8207  // 3. Prepare for next iteration.
    8208  lastOffset = suballoc.offset + suballoc.size;
    8209  --nextAlloc2ndIndex;
    8210  }
    8211  // We are at the end.
    8212  else
    8213  {
    8214  // There is free space from lastOffset to size.
    8215  if(lastOffset < size)
    8216  {
    8217  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8218  ++outInfo.unusedRangeCount;
    8219  outInfo.unusedBytes += unusedRangeSize;
    8220  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8221  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8222  }
    8223 
    8224  // End of loop.
    8225  lastOffset = size;
    8226  }
    8227  }
    8228  }
    8229 
    8230  outInfo.unusedBytes = size - outInfo.usedBytes;
    8231 }
    8232 
    8233 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8234 {
    8235  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8236  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8237  const VkDeviceSize size = GetSize();
    8238  const size_t suballoc1stCount = suballocations1st.size();
    8239  const size_t suballoc2ndCount = suballocations2nd.size();
    8240 
    8241  inoutStats.size += size;
    8242 
    8243  VkDeviceSize lastOffset = 0;
    8244 
    8245  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8246  {
    8247  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8248  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8249  while(lastOffset < freeSpace2ndTo1stEnd)
    8250  {
    8251  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8252  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8253  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8254  {
    8255  ++nextAlloc2ndIndex;
    8256  }
    8257 
    8258  // Found non-null allocation.
    8259  if(nextAlloc2ndIndex < suballoc2ndCount)
    8260  {
    8261  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8262 
    8263  // 1. Process free space before this allocation.
    8264  if(lastOffset < suballoc.offset)
    8265  {
    8266  // There is free space from lastOffset to suballoc.offset.
    8267  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8268  inoutStats.unusedSize += unusedRangeSize;
    8269  ++inoutStats.unusedRangeCount;
    8270  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8271  }
    8272 
    8273  // 2. Process this allocation.
    8274  // There is allocation with suballoc.offset, suballoc.size.
    8275  ++inoutStats.allocationCount;
    8276 
    8277  // 3. Prepare for next iteration.
    8278  lastOffset = suballoc.offset + suballoc.size;
    8279  ++nextAlloc2ndIndex;
    8280  }
    8281  // We are at the end.
    8282  else
    8283  {
    8284  if(lastOffset < freeSpace2ndTo1stEnd)
    8285  {
    8286  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8287  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8288  inoutStats.unusedSize += unusedRangeSize;
    8289  ++inoutStats.unusedRangeCount;
    8290  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8291  }
    8292 
    8293  // End of loop.
    8294  lastOffset = freeSpace2ndTo1stEnd;
    8295  }
    8296  }
    8297  }
    8298 
    8299  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8300  const VkDeviceSize freeSpace1stTo2ndEnd =
    8301  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8302  while(lastOffset < freeSpace1stTo2ndEnd)
    8303  {
    8304  // Find next non-null allocation or move nextAllocIndex to the end.
    8305  while(nextAlloc1stIndex < suballoc1stCount &&
    8306  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8307  {
    8308  ++nextAlloc1stIndex;
    8309  }
    8310 
    8311  // Found non-null allocation.
    8312  if(nextAlloc1stIndex < suballoc1stCount)
    8313  {
    8314  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8315 
    8316  // 1. Process free space before this allocation.
    8317  if(lastOffset < suballoc.offset)
    8318  {
    8319  // There is free space from lastOffset to suballoc.offset.
    8320  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8321  inoutStats.unusedSize += unusedRangeSize;
    8322  ++inoutStats.unusedRangeCount;
    8323  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8324  }
    8325 
    8326  // 2. Process this allocation.
    8327  // There is allocation with suballoc.offset, suballoc.size.
    8328  ++inoutStats.allocationCount;
    8329 
    8330  // 3. Prepare for next iteration.
    8331  lastOffset = suballoc.offset + suballoc.size;
    8332  ++nextAlloc1stIndex;
    8333  }
    8334  // We are at the end.
    8335  else
    8336  {
    8337  if(lastOffset < freeSpace1stTo2ndEnd)
    8338  {
    8339  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8340  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8341  inoutStats.unusedSize += unusedRangeSize;
    8342  ++inoutStats.unusedRangeCount;
    8343  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8344  }
    8345 
    8346  // End of loop.
    8347  lastOffset = freeSpace1stTo2ndEnd;
    8348  }
    8349  }
    8350 
    8351  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8352  {
    8353  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8354  while(lastOffset < size)
    8355  {
    8356  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8357  while(nextAlloc2ndIndex != SIZE_MAX &&
    8358  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8359  {
    8360  --nextAlloc2ndIndex;
    8361  }
    8362 
    8363  // Found non-null allocation.
    8364  if(nextAlloc2ndIndex != SIZE_MAX)
    8365  {
    8366  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8367 
    8368  // 1. Process free space before this allocation.
    8369  if(lastOffset < suballoc.offset)
    8370  {
    8371  // There is free space from lastOffset to suballoc.offset.
    8372  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8373  inoutStats.unusedSize += unusedRangeSize;
    8374  ++inoutStats.unusedRangeCount;
    8375  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8376  }
    8377 
    8378  // 2. Process this allocation.
    8379  // There is allocation with suballoc.offset, suballoc.size.
    8380  ++inoutStats.allocationCount;
    8381 
    8382  // 3. Prepare for next iteration.
    8383  lastOffset = suballoc.offset + suballoc.size;
    8384  --nextAlloc2ndIndex;
    8385  }
    8386  // We are at the end.
    8387  else
    8388  {
    8389  if(lastOffset < size)
    8390  {
    8391  // There is free space from lastOffset to size.
    8392  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8393  inoutStats.unusedSize += unusedRangeSize;
    8394  ++inoutStats.unusedRangeCount;
    8395  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8396  }
    8397 
    8398  // End of loop.
    8399  lastOffset = size;
    8400  }
    8401  }
    8402  }
    8403 }
    8404 
    8405 #if VMA_STATS_STRING_ENABLED
    8406 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8407 {
    8408  const VkDeviceSize size = GetSize();
    8409  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8410  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8411  const size_t suballoc1stCount = suballocations1st.size();
    8412  const size_t suballoc2ndCount = suballocations2nd.size();
    8413 
    8414  // FIRST PASS
    8415 
    8416  size_t unusedRangeCount = 0;
    8417  VkDeviceSize usedBytes = 0;
    8418 
    8419  VkDeviceSize lastOffset = 0;
    8420 
    8421  size_t alloc2ndCount = 0;
    8422  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8423  {
    8424  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8425  size_t nextAlloc2ndIndex = 0;
    8426  while(lastOffset < freeSpace2ndTo1stEnd)
    8427  {
    8428  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8429  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8430  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8431  {
    8432  ++nextAlloc2ndIndex;
    8433  }
    8434 
    8435  // Found non-null allocation.
    8436  if(nextAlloc2ndIndex < suballoc2ndCount)
    8437  {
    8438  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8439 
    8440  // 1. Process free space before this allocation.
    8441  if(lastOffset < suballoc.offset)
    8442  {
    8443  // There is free space from lastOffset to suballoc.offset.
    8444  ++unusedRangeCount;
    8445  }
    8446 
    8447  // 2. Process this allocation.
    8448  // There is allocation with suballoc.offset, suballoc.size.
    8449  ++alloc2ndCount;
    8450  usedBytes += suballoc.size;
    8451 
    8452  // 3. Prepare for next iteration.
    8453  lastOffset = suballoc.offset + suballoc.size;
    8454  ++nextAlloc2ndIndex;
    8455  }
    8456  // We are at the end.
    8457  else
    8458  {
    8459  if(lastOffset < freeSpace2ndTo1stEnd)
    8460  {
    8461  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8462  ++unusedRangeCount;
    8463  }
    8464 
    8465  // End of loop.
    8466  lastOffset = freeSpace2ndTo1stEnd;
    8467  }
    8468  }
    8469  }
    8470 
    8471  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8472  size_t alloc1stCount = 0;
    8473  const VkDeviceSize freeSpace1stTo2ndEnd =
    8474  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8475  while(lastOffset < freeSpace1stTo2ndEnd)
    8476  {
    8477  // Find next non-null allocation or move nextAllocIndex to the end.
    8478  while(nextAlloc1stIndex < suballoc1stCount &&
    8479  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8480  {
    8481  ++nextAlloc1stIndex;
    8482  }
    8483 
    8484  // Found non-null allocation.
    8485  if(nextAlloc1stIndex < suballoc1stCount)
    8486  {
    8487  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8488 
    8489  // 1. Process free space before this allocation.
    8490  if(lastOffset < suballoc.offset)
    8491  {
    8492  // There is free space from lastOffset to suballoc.offset.
    8493  ++unusedRangeCount;
    8494  }
    8495 
    8496  // 2. Process this allocation.
    8497  // There is allocation with suballoc.offset, suballoc.size.
    8498  ++alloc1stCount;
    8499  usedBytes += suballoc.size;
    8500 
    8501  // 3. Prepare for next iteration.
    8502  lastOffset = suballoc.offset + suballoc.size;
    8503  ++nextAlloc1stIndex;
    8504  }
    8505  // We are at the end.
    8506  else
    8507  {
    8508  if(lastOffset < size)
    8509  {
    8510  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8511  ++unusedRangeCount;
    8512  }
    8513 
    8514  // End of loop.
    8515  lastOffset = freeSpace1stTo2ndEnd;
    8516  }
    8517  }
    8518 
    8519  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8520  {
    8521  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8522  while(lastOffset < size)
    8523  {
    8524  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8525  while(nextAlloc2ndIndex != SIZE_MAX &&
    8526  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8527  {
    8528  --nextAlloc2ndIndex;
    8529  }
    8530 
    8531  // Found non-null allocation.
    8532  if(nextAlloc2ndIndex != SIZE_MAX)
    8533  {
    8534  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8535 
    8536  // 1. Process free space before this allocation.
    8537  if(lastOffset < suballoc.offset)
    8538  {
    8539  // There is free space from lastOffset to suballoc.offset.
    8540  ++unusedRangeCount;
    8541  }
    8542 
    8543  // 2. Process this allocation.
    8544  // There is allocation with suballoc.offset, suballoc.size.
    8545  ++alloc2ndCount;
    8546  usedBytes += suballoc.size;
    8547 
    8548  // 3. Prepare for next iteration.
    8549  lastOffset = suballoc.offset + suballoc.size;
    8550  --nextAlloc2ndIndex;
    8551  }
    8552  // We are at the end.
    8553  else
    8554  {
    8555  if(lastOffset < size)
    8556  {
    8557  // There is free space from lastOffset to size.
    8558  ++unusedRangeCount;
    8559  }
    8560 
    8561  // End of loop.
    8562  lastOffset = size;
    8563  }
    8564  }
    8565  }
    8566 
    8567  const VkDeviceSize unusedBytes = size - usedBytes;
    8568  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8569 
    8570  // SECOND PASS
    8571  lastOffset = 0;
    8572 
    8573  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8574  {
    8575  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8576  size_t nextAlloc2ndIndex = 0;
    8577  while(lastOffset < freeSpace2ndTo1stEnd)
    8578  {
    8579  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8580  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8581  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8582  {
    8583  ++nextAlloc2ndIndex;
    8584  }
    8585 
    8586  // Found non-null allocation.
    8587  if(nextAlloc2ndIndex < suballoc2ndCount)
    8588  {
    8589  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8590 
    8591  // 1. Process free space before this allocation.
    8592  if(lastOffset < suballoc.offset)
    8593  {
    8594  // There is free space from lastOffset to suballoc.offset.
    8595  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8596  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8597  }
    8598 
    8599  // 2. Process this allocation.
    8600  // There is allocation with suballoc.offset, suballoc.size.
    8601  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8602 
    8603  // 3. Prepare for next iteration.
    8604  lastOffset = suballoc.offset + suballoc.size;
    8605  ++nextAlloc2ndIndex;
    8606  }
    8607  // We are at the end.
    8608  else
    8609  {
    8610  if(lastOffset < freeSpace2ndTo1stEnd)
    8611  {
    8612  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8613  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8614  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8615  }
    8616 
    8617  // End of loop.
    8618  lastOffset = freeSpace2ndTo1stEnd;
    8619  }
    8620  }
    8621  }
    8622 
    8623  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8624  while(lastOffset < freeSpace1stTo2ndEnd)
    8625  {
    8626  // Find next non-null allocation or move nextAllocIndex to the end.
    8627  while(nextAlloc1stIndex < suballoc1stCount &&
    8628  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8629  {
    8630  ++nextAlloc1stIndex;
    8631  }
    8632 
    8633  // Found non-null allocation.
    8634  if(nextAlloc1stIndex < suballoc1stCount)
    8635  {
    8636  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8637 
    8638  // 1. Process free space before this allocation.
    8639  if(lastOffset < suballoc.offset)
    8640  {
    8641  // There is free space from lastOffset to suballoc.offset.
    8642  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8643  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8644  }
    8645 
    8646  // 2. Process this allocation.
    8647  // There is allocation with suballoc.offset, suballoc.size.
    8648  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8649 
    8650  // 3. Prepare for next iteration.
    8651  lastOffset = suballoc.offset + suballoc.size;
    8652  ++nextAlloc1stIndex;
    8653  }
    8654  // We are at the end.
    8655  else
    8656  {
    8657  if(lastOffset < freeSpace1stTo2ndEnd)
    8658  {
    8659  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8660  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8661  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8662  }
    8663 
    8664  // End of loop.
    8665  lastOffset = freeSpace1stTo2ndEnd;
    8666  }
    8667  }
    8668 
    8669  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8670  {
    8671  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8672  while(lastOffset < size)
    8673  {
    8674  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8675  while(nextAlloc2ndIndex != SIZE_MAX &&
    8676  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8677  {
    8678  --nextAlloc2ndIndex;
    8679  }
    8680 
    8681  // Found non-null allocation.
    8682  if(nextAlloc2ndIndex != SIZE_MAX)
    8683  {
    8684  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8685 
    8686  // 1. Process free space before this allocation.
    8687  if(lastOffset < suballoc.offset)
    8688  {
    8689  // There is free space from lastOffset to suballoc.offset.
    8690  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8691  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8692  }
    8693 
    8694  // 2. Process this allocation.
    8695  // There is allocation with suballoc.offset, suballoc.size.
    8696  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8697 
    8698  // 3. Prepare for next iteration.
    8699  lastOffset = suballoc.offset + suballoc.size;
    8700  --nextAlloc2ndIndex;
    8701  }
    8702  // We are at the end.
    8703  else
    8704  {
    8705  if(lastOffset < size)
    8706  {
    8707  // There is free space from lastOffset to size.
    8708  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8709  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8710  }
    8711 
    8712  // End of loop.
    8713  lastOffset = size;
    8714  }
    8715  }
    8716  }
    8717 
    8718  PrintDetailedMap_End(json);
    8719 }
    8720 #endif // #if VMA_STATS_STRING_ENABLED
    8721 
    8722 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8723  uint32_t currentFrameIndex,
    8724  uint32_t frameInUseCount,
    8725  VkDeviceSize bufferImageGranularity,
    8726  VkDeviceSize allocSize,
    8727  VkDeviceSize allocAlignment,
    8728  bool upperAddress,
    8729  VmaSuballocationType allocType,
    8730  bool canMakeOtherLost,
    8731  uint32_t strategy,
    8732  VmaAllocationRequest* pAllocationRequest)
    8733 {
    8734  VMA_ASSERT(allocSize > 0);
    8735  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8736  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8737  VMA_HEAVY_ASSERT(Validate());
    8738 
    8739  const VkDeviceSize size = GetSize();
    8740  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8741  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8742 
    8743  if(upperAddress)
    8744  {
    8745  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8746  {
    8747  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8748  return false;
    8749  }
    8750 
    8751  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8752  if(allocSize > size)
    8753  {
    8754  return false;
    8755  }
    8756  VkDeviceSize resultBaseOffset = size - allocSize;
    8757  if(!suballocations2nd.empty())
    8758  {
    8759  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8760  resultBaseOffset = lastSuballoc.offset - allocSize;
    8761  if(allocSize > lastSuballoc.offset)
    8762  {
    8763  return false;
    8764  }
    8765  }
    8766 
    8767  // Start from offset equal to end of free space.
    8768  VkDeviceSize resultOffset = resultBaseOffset;
    8769 
    8770  // Apply VMA_DEBUG_MARGIN at the end.
    8771  if(VMA_DEBUG_MARGIN > 0)
    8772  {
    8773  if(resultOffset < VMA_DEBUG_MARGIN)
    8774  {
    8775  return false;
    8776  }
    8777  resultOffset -= VMA_DEBUG_MARGIN;
    8778  }
    8779 
    8780  // Apply alignment.
    8781  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8782 
    8783  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8784  // Make bigger alignment if necessary.
    8785  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8786  {
    8787  bool bufferImageGranularityConflict = false;
    8788  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8789  {
    8790  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8791  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8792  {
    8793  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8794  {
    8795  bufferImageGranularityConflict = true;
    8796  break;
    8797  }
    8798  }
    8799  else
    8800  // Already on previous page.
    8801  break;
    8802  }
    8803  if(bufferImageGranularityConflict)
    8804  {
    8805  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8806  }
    8807  }
    8808 
    8809  // There is enough free space.
    8810  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8811  suballocations1st.back().offset + suballocations1st.back().size :
    8812  0;
    8813  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8814  {
    8815  // Check previous suballocations for BufferImageGranularity conflicts.
    8816  // If conflict exists, allocation cannot be made here.
    8817  if(bufferImageGranularity > 1)
    8818  {
    8819  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8820  {
    8821  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8822  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8823  {
    8824  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8825  {
    8826  return false;
    8827  }
    8828  }
    8829  else
    8830  {
    8831  // Already on next page.
    8832  break;
    8833  }
    8834  }
    8835  }
    8836 
    8837  // All tests passed: Success.
    8838  pAllocationRequest->offset = resultOffset;
    8839  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8840  pAllocationRequest->sumItemSize = 0;
    8841  // pAllocationRequest->item unused.
    8842  pAllocationRequest->itemsToMakeLostCount = 0;
    8843  return true;
    8844  }
    8845  }
    8846  else // !upperAddress
    8847  {
    8848  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8849  {
    8850  // Try to allocate at the end of 1st vector.
    8851 
    8852  VkDeviceSize resultBaseOffset = 0;
    8853  if(!suballocations1st.empty())
    8854  {
    8855  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8856  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8857  }
    8858 
    8859  // Start from offset equal to beginning of free space.
    8860  VkDeviceSize resultOffset = resultBaseOffset;
    8861 
    8862  // Apply VMA_DEBUG_MARGIN at the beginning.
    8863  if(VMA_DEBUG_MARGIN > 0)
    8864  {
    8865  resultOffset += VMA_DEBUG_MARGIN;
    8866  }
    8867 
    8868  // Apply alignment.
    8869  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8870 
    8871  // Check previous suballocations for BufferImageGranularity conflicts.
    8872  // Make bigger alignment if necessary.
    8873  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8874  {
    8875  bool bufferImageGranularityConflict = false;
    8876  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8877  {
    8878  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8879  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8880  {
    8881  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8882  {
    8883  bufferImageGranularityConflict = true;
    8884  break;
    8885  }
    8886  }
    8887  else
    8888  // Already on previous page.
    8889  break;
    8890  }
    8891  if(bufferImageGranularityConflict)
    8892  {
    8893  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8894  }
    8895  }
    8896 
    8897  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8898  suballocations2nd.back().offset : size;
    8899 
    8900  // There is enough free space at the end after alignment.
    8901  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8902  {
    8903  // Check next suballocations for BufferImageGranularity conflicts.
    8904  // If conflict exists, allocation cannot be made here.
    8905  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8906  {
    8907  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8908  {
    8909  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8910  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8911  {
    8912  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8913  {
    8914  return false;
    8915  }
    8916  }
    8917  else
    8918  {
    8919  // Already on previous page.
    8920  break;
    8921  }
    8922  }
    8923  }
    8924 
    8925  // All tests passed: Success.
    8926  pAllocationRequest->offset = resultOffset;
    8927  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8928  pAllocationRequest->sumItemSize = 0;
    8929  // pAllocationRequest->item unused.
    8930  pAllocationRequest->itemsToMakeLostCount = 0;
    8931  return true;
    8932  }
    8933  }
    8934 
    8935  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8936  // beginning of 1st vector as the end of free space.
    8937  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8938  {
    8939  VMA_ASSERT(!suballocations1st.empty());
    8940 
    8941  VkDeviceSize resultBaseOffset = 0;
    8942  if(!suballocations2nd.empty())
    8943  {
    8944  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8945  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8946  }
    8947 
    8948  // Start from offset equal to beginning of free space.
    8949  VkDeviceSize resultOffset = resultBaseOffset;
    8950 
    8951  // Apply VMA_DEBUG_MARGIN at the beginning.
    8952  if(VMA_DEBUG_MARGIN > 0)
    8953  {
    8954  resultOffset += VMA_DEBUG_MARGIN;
    8955  }
    8956 
    8957  // Apply alignment.
    8958  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8959 
    8960  // Check previous suballocations for BufferImageGranularity conflicts.
    8961  // Make bigger alignment if necessary.
    8962  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8963  {
    8964  bool bufferImageGranularityConflict = false;
    8965  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8966  {
    8967  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8968  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8969  {
    8970  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8971  {
    8972  bufferImageGranularityConflict = true;
    8973  break;
    8974  }
    8975  }
    8976  else
    8977  // Already on previous page.
    8978  break;
    8979  }
    8980  if(bufferImageGranularityConflict)
    8981  {
    8982  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8983  }
    8984  }
    8985 
    8986  pAllocationRequest->itemsToMakeLostCount = 0;
    8987  pAllocationRequest->sumItemSize = 0;
    8988  size_t index1st = m_1stNullItemsBeginCount;
    8989 
    8990  if(canMakeOtherLost)
    8991  {
    8992  while(index1st < suballocations1st.size() &&
    8993  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8994  {
    8995  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8996  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8997  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8998  {
    8999  // No problem.
    9000  }
    9001  else
    9002  {
    9003  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9004  if(suballoc.hAllocation->CanBecomeLost() &&
    9005  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9006  {
    9007  ++pAllocationRequest->itemsToMakeLostCount;
    9008  pAllocationRequest->sumItemSize += suballoc.size;
    9009  }
    9010  else
    9011  {
    9012  return false;
    9013  }
    9014  }
    9015  ++index1st;
    9016  }
    9017 
    9018  // Check next suballocations for BufferImageGranularity conflicts.
    9019  // If conflict exists, we must mark more allocations lost or fail.
    9020  if(bufferImageGranularity > 1)
    9021  {
    9022  while(index1st < suballocations1st.size())
    9023  {
    9024  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9025  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9026  {
    9027  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9028  {
    9029  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9030  if(suballoc.hAllocation->CanBecomeLost() &&
    9031  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9032  {
    9033  ++pAllocationRequest->itemsToMakeLostCount;
    9034  pAllocationRequest->sumItemSize += suballoc.size;
    9035  }
    9036  else
    9037  {
    9038  return false;
    9039  }
    9040  }
    9041  }
    9042  else
    9043  {
    9044  // Already on next page.
    9045  break;
    9046  }
    9047  ++index1st;
    9048  }
    9049  }
    9050  }
    9051 
    9052  // There is enough free space at the end after alignment.
    9053  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9054  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9055  {
    9056  // Check next suballocations for BufferImageGranularity conflicts.
    9057  // If conflict exists, allocation cannot be made here.
    9058  if(bufferImageGranularity > 1)
    9059  {
    9060  for(size_t nextSuballocIndex = index1st;
    9061  nextSuballocIndex < suballocations1st.size();
    9062  nextSuballocIndex++)
    9063  {
    9064  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9065  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9066  {
    9067  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9068  {
    9069  return false;
    9070  }
    9071  }
    9072  else
    9073  {
    9074  // Already on next page.
    9075  break;
    9076  }
    9077  }
    9078  }
    9079 
    9080  // All tests passed: Success.
    9081  pAllocationRequest->offset = resultOffset;
    9082  pAllocationRequest->sumFreeSize =
    9083  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9084  - resultBaseOffset
    9085  - pAllocationRequest->sumItemSize;
    9086  // pAllocationRequest->item unused.
    9087  return true;
    9088  }
    9089  }
    9090  }
    9091 
    9092  return false;
    9093 }
    9094 
    9095 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9096  uint32_t currentFrameIndex,
    9097  uint32_t frameInUseCount,
    9098  VmaAllocationRequest* pAllocationRequest)
    9099 {
    9100  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9101  {
    9102  return true;
    9103  }
    9104 
    9105  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9106 
    9107  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9108  size_t index1st = m_1stNullItemsBeginCount;
    9109  size_t madeLostCount = 0;
    9110  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9111  {
    9112  VMA_ASSERT(index1st < suballocations1st.size());
    9113  VmaSuballocation& suballoc = suballocations1st[index1st];
    9114  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9115  {
    9116  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9117  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9118  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9119  {
    9120  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9121  suballoc.hAllocation = VK_NULL_HANDLE;
    9122  m_SumFreeSize += suballoc.size;
    9123  ++m_1stNullItemsMiddleCount;
    9124  ++madeLostCount;
    9125  }
    9126  else
    9127  {
    9128  return false;
    9129  }
    9130  }
    9131  ++index1st;
    9132  }
    9133 
    9134  CleanupAfterFree();
    9135  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9136 
    9137  return true;
    9138 }
    9139 
    9140 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9141 {
    9142  uint32_t lostAllocationCount = 0;
    9143 
    9144  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9145  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9146  {
    9147  VmaSuballocation& suballoc = suballocations1st[i];
    9148  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9149  suballoc.hAllocation->CanBecomeLost() &&
    9150  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9151  {
    9152  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9153  suballoc.hAllocation = VK_NULL_HANDLE;
    9154  ++m_1stNullItemsMiddleCount;
    9155  m_SumFreeSize += suballoc.size;
    9156  ++lostAllocationCount;
    9157  }
    9158  }
    9159 
    9160  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9161  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9162  {
    9163  VmaSuballocation& suballoc = suballocations2nd[i];
    9164  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9165  suballoc.hAllocation->CanBecomeLost() &&
    9166  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9167  {
    9168  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9169  suballoc.hAllocation = VK_NULL_HANDLE;
    9170  ++m_2ndNullItemsCount;
    9171  ++lostAllocationCount;
    9172  }
    9173  }
    9174 
    9175  if(lostAllocationCount)
    9176  {
    9177  CleanupAfterFree();
    9178  }
    9179 
    9180  return lostAllocationCount;
    9181 }
    9182 
    9183 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9184 {
    9185  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9186  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9187  {
    9188  const VmaSuballocation& suballoc = suballocations1st[i];
    9189  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9190  {
    9191  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9192  {
    9193  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9194  return VK_ERROR_VALIDATION_FAILED_EXT;
    9195  }
    9196  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9197  {
    9198  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9199  return VK_ERROR_VALIDATION_FAILED_EXT;
    9200  }
    9201  }
    9202  }
    9203 
    9204  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9205  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9206  {
    9207  const VmaSuballocation& suballoc = suballocations2nd[i];
    9208  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9209  {
    9210  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9211  {
    9212  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9213  return VK_ERROR_VALIDATION_FAILED_EXT;
    9214  }
    9215  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9216  {
    9217  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9218  return VK_ERROR_VALIDATION_FAILED_EXT;
    9219  }
    9220  }
    9221  }
    9222 
    9223  return VK_SUCCESS;
    9224 }
    9225 
    9226 void VmaBlockMetadata_Linear::Alloc(
    9227  const VmaAllocationRequest& request,
    9228  VmaSuballocationType type,
    9229  VkDeviceSize allocSize,
    9230  bool upperAddress,
    9231  VmaAllocation hAllocation)
    9232 {
    9233  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9234 
    9235  if(upperAddress)
    9236  {
    9237  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9238  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9239  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9240  suballocations2nd.push_back(newSuballoc);
    9241  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9242  }
    9243  else
    9244  {
    9245  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9246 
    9247  // First allocation.
    9248  if(suballocations1st.empty())
    9249  {
    9250  suballocations1st.push_back(newSuballoc);
    9251  }
    9252  else
    9253  {
    9254  // New allocation at the end of 1st vector.
    9255  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9256  {
    9257  // Check if it fits before the end of the block.
    9258  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9259  suballocations1st.push_back(newSuballoc);
    9260  }
    9261  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9262  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9263  {
    9264  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9265 
    9266  switch(m_2ndVectorMode)
    9267  {
    9268  case SECOND_VECTOR_EMPTY:
    9269  // First allocation from second part ring buffer.
    9270  VMA_ASSERT(suballocations2nd.empty());
    9271  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9272  break;
    9273  case SECOND_VECTOR_RING_BUFFER:
    9274  // 2-part ring buffer is already started.
    9275  VMA_ASSERT(!suballocations2nd.empty());
    9276  break;
    9277  case SECOND_VECTOR_DOUBLE_STACK:
    9278  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9279  break;
    9280  default:
    9281  VMA_ASSERT(0);
    9282  }
    9283 
    9284  suballocations2nd.push_back(newSuballoc);
    9285  }
    9286  else
    9287  {
    9288  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9289  }
    9290  }
    9291  }
    9292 
    9293  m_SumFreeSize -= newSuballoc.size;
    9294 }
    9295 
    9296 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9297 {
    9298  FreeAtOffset(allocation->GetOffset());
    9299 }
    9300 
    9301 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9302 {
    9303  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9305 
    9306  if(!suballocations1st.empty())
    9307  {
    9308  // First allocation: Mark it as next empty at the beginning.
    9309  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9310  if(firstSuballoc.offset == offset)
    9311  {
    9312  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9313  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9314  m_SumFreeSize += firstSuballoc.size;
    9315  ++m_1stNullItemsBeginCount;
    9316  CleanupAfterFree();
    9317  return;
    9318  }
    9319  }
    9320 
    9321  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9322  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9323  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9324  {
    9325  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9326  if(lastSuballoc.offset == offset)
    9327  {
    9328  m_SumFreeSize += lastSuballoc.size;
    9329  suballocations2nd.pop_back();
    9330  CleanupAfterFree();
    9331  return;
    9332  }
    9333  }
    9334  // Last allocation in 1st vector.
    9335  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9336  {
    9337  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9338  if(lastSuballoc.offset == offset)
    9339  {
    9340  m_SumFreeSize += lastSuballoc.size;
    9341  suballocations1st.pop_back();
    9342  CleanupAfterFree();
    9343  return;
    9344  }
    9345  }
    9346 
    9347  // Item from the middle of 1st vector.
    9348  {
    9349  VmaSuballocation refSuballoc;
    9350  refSuballoc.offset = offset;
    9351  // Rest of members stays uninitialized intentionally for better performance.
    9352  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9353  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9354  suballocations1st.end(),
    9355  refSuballoc);
    9356  if(it != suballocations1st.end())
    9357  {
    9358  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9359  it->hAllocation = VK_NULL_HANDLE;
    9360  ++m_1stNullItemsMiddleCount;
    9361  m_SumFreeSize += it->size;
    9362  CleanupAfterFree();
    9363  return;
    9364  }
    9365  }
    9366 
    9367  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9368  {
    9369  // Item from the middle of 2nd vector.
    9370  VmaSuballocation refSuballoc;
    9371  refSuballoc.offset = offset;
    9372  // Rest of members stays uninitialized intentionally for better performance.
    9373  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9374  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9375  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9376  if(it != suballocations2nd.end())
    9377  {
    9378  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9379  it->hAllocation = VK_NULL_HANDLE;
    9380  ++m_2ndNullItemsCount;
    9381  m_SumFreeSize += it->size;
    9382  CleanupAfterFree();
    9383  return;
    9384  }
    9385  }
    9386 
    9387  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9388 }
    9389 
    9390 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9391 {
    9392  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9393  const size_t suballocCount = AccessSuballocations1st().size();
    9394  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9395 }
    9396 
    9397 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9398 {
    9399  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9400  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9401 
    9402  if(IsEmpty())
    9403  {
    9404  suballocations1st.clear();
    9405  suballocations2nd.clear();
    9406  m_1stNullItemsBeginCount = 0;
    9407  m_1stNullItemsMiddleCount = 0;
    9408  m_2ndNullItemsCount = 0;
    9409  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9410  }
    9411  else
    9412  {
    9413  const size_t suballoc1stCount = suballocations1st.size();
    9414  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9415  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9416 
    9417  // Find more null items at the beginning of 1st vector.
    9418  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9419  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9420  {
    9421  ++m_1stNullItemsBeginCount;
    9422  --m_1stNullItemsMiddleCount;
    9423  }
    9424 
    9425  // Find more null items at the end of 1st vector.
    9426  while(m_1stNullItemsMiddleCount > 0 &&
    9427  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9428  {
    9429  --m_1stNullItemsMiddleCount;
    9430  suballocations1st.pop_back();
    9431  }
    9432 
    9433  // Find more null items at the end of 2nd vector.
    9434  while(m_2ndNullItemsCount > 0 &&
    9435  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9436  {
    9437  --m_2ndNullItemsCount;
    9438  suballocations2nd.pop_back();
    9439  }
    9440 
    9441  if(ShouldCompact1st())
    9442  {
    9443  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9444  size_t srcIndex = m_1stNullItemsBeginCount;
    9445  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9446  {
    9447  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9448  {
    9449  ++srcIndex;
    9450  }
    9451  if(dstIndex != srcIndex)
    9452  {
    9453  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9454  }
    9455  ++srcIndex;
    9456  }
    9457  suballocations1st.resize(nonNullItemCount);
    9458  m_1stNullItemsBeginCount = 0;
    9459  m_1stNullItemsMiddleCount = 0;
    9460  }
    9461 
    9462  // 2nd vector became empty.
    9463  if(suballocations2nd.empty())
    9464  {
    9465  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9466  }
    9467 
    9468  // 1st vector became empty.
    9469  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9470  {
    9471  suballocations1st.clear();
    9472  m_1stNullItemsBeginCount = 0;
    9473 
    9474  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9475  {
    9476  // Swap 1st with 2nd. Now 2nd is empty.
    9477  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9478  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9479  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9480  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9481  {
    9482  ++m_1stNullItemsBeginCount;
    9483  --m_1stNullItemsMiddleCount;
    9484  }
    9485  m_2ndNullItemsCount = 0;
    9486  m_1stVectorIndex ^= 1;
    9487  }
    9488  }
    9489  }
    9490 
    9491  VMA_HEAVY_ASSERT(Validate());
    9492 }
    9493 
    9494 
    9496 // class VmaBlockMetadata_Buddy
    9497 
    9498 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9499  VmaBlockMetadata(hAllocator),
    9500  m_Root(VMA_NULL),
    9501  m_AllocationCount(0),
    9502  m_FreeCount(1),
    9503  m_SumFreeSize(0)
    9504 {
    9505  memset(m_FreeList, 0, sizeof(m_FreeList));
    9506 }
    9507 
    9508 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9509 {
    9510  DeleteNode(m_Root);
    9511 }
    9512 
    9513 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9514 {
    9515  VmaBlockMetadata::Init(size);
    9516 
    9517  m_UsableSize = VmaPrevPow2(size);
    9518  m_SumFreeSize = m_UsableSize;
    9519 
    9520  // Calculate m_LevelCount.
    9521  m_LevelCount = 1;
    9522  while(m_LevelCount < MAX_LEVELS &&
    9523  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9524  {
    9525  ++m_LevelCount;
    9526  }
    9527 
    9528  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9529  rootNode->offset = 0;
    9530  rootNode->type = Node::TYPE_FREE;
    9531  rootNode->parent = VMA_NULL;
    9532  rootNode->buddy = VMA_NULL;
    9533 
    9534  m_Root = rootNode;
    9535  AddToFreeListFront(0, rootNode);
    9536 }
    9537 
    9538 bool VmaBlockMetadata_Buddy::Validate() const
    9539 {
    9540  // Validate tree.
    9541  ValidationContext ctx;
    9542  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9543  {
    9544  VMA_VALIDATE(false && "ValidateNode failed.");
    9545  }
    9546  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9547  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9548 
    9549  // Validate free node lists.
    9550  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9551  {
    9552  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9553  m_FreeList[level].front->free.prev == VMA_NULL);
    9554 
    9555  for(Node* node = m_FreeList[level].front;
    9556  node != VMA_NULL;
    9557  node = node->free.next)
    9558  {
    9559  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9560 
    9561  if(node->free.next == VMA_NULL)
    9562  {
    9563  VMA_VALIDATE(m_FreeList[level].back == node);
    9564  }
    9565  else
    9566  {
    9567  VMA_VALIDATE(node->free.next->free.prev == node);
    9568  }
    9569  }
    9570  }
    9571 
    9572  // Validate that free lists ar higher levels are empty.
    9573  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9574  {
    9575  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9576  }
    9577 
    9578  return true;
    9579 }
    9580 
    9581 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9582 {
    9583  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9584  {
    9585  if(m_FreeList[level].front != VMA_NULL)
    9586  {
    9587  return LevelToNodeSize(level);
    9588  }
    9589  }
    9590  return 0;
    9591 }
    9592 
    9593 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9594 {
    9595  const VkDeviceSize unusableSize = GetUnusableSize();
    9596 
    9597  outInfo.blockCount = 1;
    9598 
    9599  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9600  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9601 
    9602  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9603  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9604  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9605 
    9606  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9607 
    9608  if(unusableSize > 0)
    9609  {
    9610  ++outInfo.unusedRangeCount;
    9611  outInfo.unusedBytes += unusableSize;
    9612  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9613  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9614  }
    9615 }
    9616 
    9617 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9618 {
    9619  const VkDeviceSize unusableSize = GetUnusableSize();
    9620 
    9621  inoutStats.size += GetSize();
    9622  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9623  inoutStats.allocationCount += m_AllocationCount;
    9624  inoutStats.unusedRangeCount += m_FreeCount;
    9625  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9626 
    9627  if(unusableSize > 0)
    9628  {
    9629  ++inoutStats.unusedRangeCount;
    9630  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9631  }
    9632 }
    9633 
    9634 #if VMA_STATS_STRING_ENABLED
    9635 
    9636 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9637 {
    9638  // TODO optimize
    9639  VmaStatInfo stat;
    9640  CalcAllocationStatInfo(stat);
    9641 
    9642  PrintDetailedMap_Begin(
    9643  json,
    9644  stat.unusedBytes,
    9645  stat.allocationCount,
    9646  stat.unusedRangeCount);
    9647 
    9648  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9649 
    9650  const VkDeviceSize unusableSize = GetUnusableSize();
    9651  if(unusableSize > 0)
    9652  {
    9653  PrintDetailedMap_UnusedRange(json,
    9654  m_UsableSize, // offset
    9655  unusableSize); // size
    9656  }
    9657 
    9658  PrintDetailedMap_End(json);
    9659 }
    9660 
    9661 #endif // #if VMA_STATS_STRING_ENABLED
    9662 
    9663 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9664  uint32_t currentFrameIndex,
    9665  uint32_t frameInUseCount,
    9666  VkDeviceSize bufferImageGranularity,
    9667  VkDeviceSize allocSize,
    9668  VkDeviceSize allocAlignment,
    9669  bool upperAddress,
    9670  VmaSuballocationType allocType,
    9671  bool canMakeOtherLost,
    9672  uint32_t strategy,
    9673  VmaAllocationRequest* pAllocationRequest)
    9674 {
    9675  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9676 
    9677  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9678  // Whenever it might be an OPTIMAL image...
    9679  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9680  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9682  {
    9683  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9684  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9685  }
    9686 
    9687  if(allocSize > m_UsableSize)
    9688  {
    9689  return false;
    9690  }
    9691 
    9692  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9693  for(uint32_t level = targetLevel + 1; level--; )
    9694  {
    9695  for(Node* freeNode = m_FreeList[level].front;
    9696  freeNode != VMA_NULL;
    9697  freeNode = freeNode->free.next)
    9698  {
    9699  if(freeNode->offset % allocAlignment == 0)
    9700  {
    9701  pAllocationRequest->offset = freeNode->offset;
    9702  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9703  pAllocationRequest->sumItemSize = 0;
    9704  pAllocationRequest->itemsToMakeLostCount = 0;
    9705  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9706  return true;
    9707  }
    9708  }
    9709  }
    9710 
    9711  return false;
    9712 }
    9713 
    9714 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9715  uint32_t currentFrameIndex,
    9716  uint32_t frameInUseCount,
    9717  VmaAllocationRequest* pAllocationRequest)
    9718 {
    9719  /*
    9720  Lost allocations are not supported in buddy allocator at the moment.
    9721  Support might be added in the future.
    9722  */
    9723  return pAllocationRequest->itemsToMakeLostCount == 0;
    9724 }
    9725 
    9726 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9727 {
    9728  /*
    9729  Lost allocations are not supported in buddy allocator at the moment.
    9730  Support might be added in the future.
    9731  */
    9732  return 0;
    9733 }
    9734 
    9735 void VmaBlockMetadata_Buddy::Alloc(
    9736  const VmaAllocationRequest& request,
    9737  VmaSuballocationType type,
    9738  VkDeviceSize allocSize,
    9739  bool upperAddress,
    9740  VmaAllocation hAllocation)
    9741 {
    9742  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9743  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9744 
    9745  Node* currNode = m_FreeList[currLevel].front;
    9746  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9747  while(currNode->offset != request.offset)
    9748  {
    9749  currNode = currNode->free.next;
    9750  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9751  }
    9752 
    9753  // Go down, splitting free nodes.
    9754  while(currLevel < targetLevel)
    9755  {
    9756  // currNode is already first free node at currLevel.
    9757  // Remove it from list of free nodes at this currLevel.
    9758  RemoveFromFreeList(currLevel, currNode);
    9759 
    9760  const uint32_t childrenLevel = currLevel + 1;
    9761 
    9762  // Create two free sub-nodes.
    9763  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9764  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9765 
    9766  leftChild->offset = currNode->offset;
    9767  leftChild->type = Node::TYPE_FREE;
    9768  leftChild->parent = currNode;
    9769  leftChild->buddy = rightChild;
    9770 
    9771  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9772  rightChild->type = Node::TYPE_FREE;
    9773  rightChild->parent = currNode;
    9774  rightChild->buddy = leftChild;
    9775 
    9776  // Convert current currNode to split type.
    9777  currNode->type = Node::TYPE_SPLIT;
    9778  currNode->split.leftChild = leftChild;
    9779 
    9780  // Add child nodes to free list. Order is important!
    9781  AddToFreeListFront(childrenLevel, rightChild);
    9782  AddToFreeListFront(childrenLevel, leftChild);
    9783 
    9784  ++m_FreeCount;
    9785  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9786  ++currLevel;
    9787  currNode = m_FreeList[currLevel].front;
    9788 
    9789  /*
    9790  We can be sure that currNode, as left child of node previously split,
    9791  also fullfills the alignment requirement.
    9792  */
    9793  }
    9794 
    9795  // Remove from free list.
    9796  VMA_ASSERT(currLevel == targetLevel &&
    9797  currNode != VMA_NULL &&
    9798  currNode->type == Node::TYPE_FREE);
    9799  RemoveFromFreeList(currLevel, currNode);
    9800 
    9801  // Convert to allocation node.
    9802  currNode->type = Node::TYPE_ALLOCATION;
    9803  currNode->allocation.alloc = hAllocation;
    9804 
    9805  ++m_AllocationCount;
    9806  --m_FreeCount;
    9807  m_SumFreeSize -= allocSize;
    9808 }
    9809 
    9810 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9811 {
    9812  if(node->type == Node::TYPE_SPLIT)
    9813  {
    9814  DeleteNode(node->split.leftChild->buddy);
    9815  DeleteNode(node->split.leftChild);
    9816  }
    9817 
    9818  vma_delete(GetAllocationCallbacks(), node);
    9819 }
    9820 
    9821 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9822 {
    9823  VMA_VALIDATE(level < m_LevelCount);
    9824  VMA_VALIDATE(curr->parent == parent);
    9825  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9826  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9827  switch(curr->type)
    9828  {
    9829  case Node::TYPE_FREE:
    9830  // curr->free.prev, next are validated separately.
    9831  ctx.calculatedSumFreeSize += levelNodeSize;
    9832  ++ctx.calculatedFreeCount;
    9833  break;
    9834  case Node::TYPE_ALLOCATION:
    9835  ++ctx.calculatedAllocationCount;
    9836  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9837  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9838  break;
    9839  case Node::TYPE_SPLIT:
    9840  {
    9841  const uint32_t childrenLevel = level + 1;
    9842  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9843  const Node* const leftChild = curr->split.leftChild;
    9844  VMA_VALIDATE(leftChild != VMA_NULL);
    9845  VMA_VALIDATE(leftChild->offset == curr->offset);
    9846  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9847  {
    9848  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9849  }
    9850  const Node* const rightChild = leftChild->buddy;
    9851  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9852  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9853  {
    9854  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9855  }
    9856  }
    9857  break;
    9858  default:
    9859  return false;
    9860  }
    9861 
    9862  return true;
    9863 }
    9864 
    9865 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9866 {
    9867  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9868  uint32_t level = 0;
    9869  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9870  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9871  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9872  {
    9873  ++level;
    9874  currLevelNodeSize = nextLevelNodeSize;
    9875  nextLevelNodeSize = currLevelNodeSize >> 1;
    9876  }
    9877  return level;
    9878 }
    9879 
    9880 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9881 {
    9882  // Find node and level.
    9883  Node* node = m_Root;
    9884  VkDeviceSize nodeOffset = 0;
    9885  uint32_t level = 0;
    9886  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9887  while(node->type == Node::TYPE_SPLIT)
    9888  {
    9889  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9890  if(offset < nodeOffset + nextLevelSize)
    9891  {
    9892  node = node->split.leftChild;
    9893  }
    9894  else
    9895  {
    9896  node = node->split.leftChild->buddy;
    9897  nodeOffset += nextLevelSize;
    9898  }
    9899  ++level;
    9900  levelNodeSize = nextLevelSize;
    9901  }
    9902 
    9903  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9904  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9905 
    9906  ++m_FreeCount;
    9907  --m_AllocationCount;
    9908  m_SumFreeSize += alloc->GetSize();
    9909 
    9910  node->type = Node::TYPE_FREE;
    9911 
    9912  // Join free nodes if possible.
    9913  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9914  {
    9915  RemoveFromFreeList(level, node->buddy);
    9916  Node* const parent = node->parent;
    9917 
    9918  vma_delete(GetAllocationCallbacks(), node->buddy);
    9919  vma_delete(GetAllocationCallbacks(), node);
    9920  parent->type = Node::TYPE_FREE;
    9921 
    9922  node = parent;
    9923  --level;
    9924  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9925  --m_FreeCount;
    9926  }
    9927 
    9928  AddToFreeListFront(level, node);
    9929 }
    9930 
    9931 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9932 {
    9933  switch(node->type)
    9934  {
    9935  case Node::TYPE_FREE:
    9936  ++outInfo.unusedRangeCount;
    9937  outInfo.unusedBytes += levelNodeSize;
    9938  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9939  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9940  break;
    9941  case Node::TYPE_ALLOCATION:
    9942  {
    9943  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9944  ++outInfo.allocationCount;
    9945  outInfo.usedBytes += allocSize;
    9946  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9947  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9948 
    9949  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9950  if(unusedRangeSize > 0)
    9951  {
    9952  ++outInfo.unusedRangeCount;
    9953  outInfo.unusedBytes += unusedRangeSize;
    9954  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9955  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9956  }
    9957  }
    9958  break;
    9959  case Node::TYPE_SPLIT:
    9960  {
    9961  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9962  const Node* const leftChild = node->split.leftChild;
    9963  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9964  const Node* const rightChild = leftChild->buddy;
    9965  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9966  }
    9967  break;
    9968  default:
    9969  VMA_ASSERT(0);
    9970  }
    9971 }
    9972 
    9973 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9974 {
    9975  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9976 
    9977  // List is empty.
    9978  Node* const frontNode = m_FreeList[level].front;
    9979  if(frontNode == VMA_NULL)
    9980  {
    9981  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9982  node->free.prev = node->free.next = VMA_NULL;
    9983  m_FreeList[level].front = m_FreeList[level].back = node;
    9984  }
    9985  else
    9986  {
    9987  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9988  node->free.prev = VMA_NULL;
    9989  node->free.next = frontNode;
    9990  frontNode->free.prev = node;
    9991  m_FreeList[level].front = node;
    9992  }
    9993 }
    9994 
    9995 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9996 {
    9997  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9998 
    9999  // It is at the front.
    10000  if(node->free.prev == VMA_NULL)
    10001  {
    10002  VMA_ASSERT(m_FreeList[level].front == node);
    10003  m_FreeList[level].front = node->free.next;
    10004  }
    10005  else
    10006  {
    10007  Node* const prevFreeNode = node->free.prev;
    10008  VMA_ASSERT(prevFreeNode->free.next == node);
    10009  prevFreeNode->free.next = node->free.next;
    10010  }
    10011 
    10012  // It is at the back.
    10013  if(node->free.next == VMA_NULL)
    10014  {
    10015  VMA_ASSERT(m_FreeList[level].back == node);
    10016  m_FreeList[level].back = node->free.prev;
    10017  }
    10018  else
    10019  {
    10020  Node* const nextFreeNode = node->free.next;
    10021  VMA_ASSERT(nextFreeNode->free.prev == node);
    10022  nextFreeNode->free.prev = node->free.prev;
    10023  }
    10024 }
    10025 
    10026 #if VMA_STATS_STRING_ENABLED
    10027 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10028 {
    10029  switch(node->type)
    10030  {
    10031  case Node::TYPE_FREE:
    10032  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10033  break;
    10034  case Node::TYPE_ALLOCATION:
    10035  {
    10036  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10037  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10038  if(allocSize < levelNodeSize)
    10039  {
    10040  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10041  }
    10042  }
    10043  break;
    10044  case Node::TYPE_SPLIT:
    10045  {
    10046  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10047  const Node* const leftChild = node->split.leftChild;
    10048  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10049  const Node* const rightChild = leftChild->buddy;
    10050  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10051  }
    10052  break;
    10053  default:
    10054  VMA_ASSERT(0);
    10055  }
    10056 }
    10057 #endif // #if VMA_STATS_STRING_ENABLED
    10058 
    10059 
    10061 // class VmaDeviceMemoryBlock
    10062 
    10063 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10064  m_pMetadata(VMA_NULL),
    10065  m_MemoryTypeIndex(UINT32_MAX),
    10066  m_Id(0),
    10067  m_hMemory(VK_NULL_HANDLE),
    10068  m_MapCount(0),
    10069  m_pMappedData(VMA_NULL)
    10070 {
    10071 }
    10072 
    10073 void VmaDeviceMemoryBlock::Init(
    10074  VmaAllocator hAllocator,
    10075  uint32_t newMemoryTypeIndex,
    10076  VkDeviceMemory newMemory,
    10077  VkDeviceSize newSize,
    10078  uint32_t id,
    10079  uint32_t algorithm)
    10080 {
    10081  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10082 
    10083  m_MemoryTypeIndex = newMemoryTypeIndex;
    10084  m_Id = id;
    10085  m_hMemory = newMemory;
    10086 
    10087  switch(algorithm)
    10088  {
    10090  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10091  break;
    10093  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10094  break;
    10095  default:
    10096  VMA_ASSERT(0);
    10097  // Fall-through.
    10098  case 0:
    10099  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10100  }
    10101  m_pMetadata->Init(newSize);
    10102 }
    10103 
    10104 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10105 {
    10106  // This is the most important assert in the entire library.
    10107  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10108  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10109 
    10110  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10111  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10112  m_hMemory = VK_NULL_HANDLE;
    10113 
    10114  vma_delete(allocator, m_pMetadata);
    10115  m_pMetadata = VMA_NULL;
    10116 }
    10117 
    10118 bool VmaDeviceMemoryBlock::Validate() const
    10119 {
    10120  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10121  (m_pMetadata->GetSize() != 0));
    10122 
    10123  return m_pMetadata->Validate();
    10124 }
    10125 
    10126 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10127 {
    10128  void* pData = nullptr;
    10129  VkResult res = Map(hAllocator, 1, &pData);
    10130  if(res != VK_SUCCESS)
    10131  {
    10132  return res;
    10133  }
    10134 
    10135  res = m_pMetadata->CheckCorruption(pData);
    10136 
    10137  Unmap(hAllocator, 1);
    10138 
    10139  return res;
    10140 }
    10141 
    10142 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10143 {
    10144  if(count == 0)
    10145  {
    10146  return VK_SUCCESS;
    10147  }
    10148 
    10149  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10150  if(m_MapCount != 0)
    10151  {
    10152  m_MapCount += count;
    10153  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10154  if(ppData != VMA_NULL)
    10155  {
    10156  *ppData = m_pMappedData;
    10157  }
    10158  return VK_SUCCESS;
    10159  }
    10160  else
    10161  {
    10162  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10163  hAllocator->m_hDevice,
    10164  m_hMemory,
    10165  0, // offset
    10166  VK_WHOLE_SIZE,
    10167  0, // flags
    10168  &m_pMappedData);
    10169  if(result == VK_SUCCESS)
    10170  {
    10171  if(ppData != VMA_NULL)
    10172  {
    10173  *ppData = m_pMappedData;
    10174  }
    10175  m_MapCount = count;
    10176  }
    10177  return result;
    10178  }
    10179 }
    10180 
    10181 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10182 {
    10183  if(count == 0)
    10184  {
    10185  return;
    10186  }
    10187 
    10188  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10189  if(m_MapCount >= count)
    10190  {
    10191  m_MapCount -= count;
    10192  if(m_MapCount == 0)
    10193  {
    10194  m_pMappedData = VMA_NULL;
    10195  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10196  }
    10197  }
    10198  else
    10199  {
    10200  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10201  }
    10202 }
    10203 
    10204 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10205 {
    10206  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10207  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10208 
    10209  void* pData;
    10210  VkResult res = Map(hAllocator, 1, &pData);
    10211  if(res != VK_SUCCESS)
    10212  {
    10213  return res;
    10214  }
    10215 
    10216  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10217  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10218 
    10219  Unmap(hAllocator, 1);
    10220 
    10221  return VK_SUCCESS;
    10222 }
    10223 
    10224 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10225 {
    10226  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10227  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10228 
    10229  void* pData;
    10230  VkResult res = Map(hAllocator, 1, &pData);
    10231  if(res != VK_SUCCESS)
    10232  {
    10233  return res;
    10234  }
    10235 
    10236  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10237  {
    10238  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10239  }
    10240  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10241  {
    10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10243  }
    10244 
    10245  Unmap(hAllocator, 1);
    10246 
    10247  return VK_SUCCESS;
    10248 }
    10249 
    10250 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10251  const VmaAllocator hAllocator,
    10252  const VmaAllocation hAllocation,
    10253  VkBuffer hBuffer)
    10254 {
    10255  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10256  hAllocation->GetBlock() == this);
    10257  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10258  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10259  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10260  hAllocator->m_hDevice,
    10261  hBuffer,
    10262  m_hMemory,
    10263  hAllocation->GetOffset());
    10264 }
    10265 
    10266 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10267  const VmaAllocator hAllocator,
    10268  const VmaAllocation hAllocation,
    10269  VkImage hImage)
    10270 {
    10271  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10272  hAllocation->GetBlock() == this);
    10273  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10274  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10275  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10276  hAllocator->m_hDevice,
    10277  hImage,
    10278  m_hMemory,
    10279  hAllocation->GetOffset());
    10280 }
    10281 
    10282 static void InitStatInfo(VmaStatInfo& outInfo)
    10283 {
    10284  memset(&outInfo, 0, sizeof(outInfo));
    10285  outInfo.allocationSizeMin = UINT64_MAX;
    10286  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10287 }
    10288 
    10289 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10290 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10291 {
    10292  inoutInfo.blockCount += srcInfo.blockCount;
    10293  inoutInfo.allocationCount += srcInfo.allocationCount;
    10294  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10295  inoutInfo.usedBytes += srcInfo.usedBytes;
    10296  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10297  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10298  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10299  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10300  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10301 }
    10302 
    10303 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10304 {
    10305  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10306  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10307  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10308  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10309 }
    10310 
    10311 VmaPool_T::VmaPool_T(
    10312  VmaAllocator hAllocator,
    10313  const VmaPoolCreateInfo& createInfo,
    10314  VkDeviceSize preferredBlockSize) :
    10315  m_BlockVector(
    10316  hAllocator,
    10317  createInfo.memoryTypeIndex,
    10318  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10319  createInfo.minBlockCount,
    10320  createInfo.maxBlockCount,
    10321  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10322  createInfo.frameInUseCount,
    10323  true, // isCustomPool
    10324  createInfo.blockSize != 0, // explicitBlockSize
    10325  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10326  m_Id(0)
    10327 {
    10328 }
    10329 
    10330 VmaPool_T::~VmaPool_T()
    10331 {
    10332 }
    10333 
    10334 #if VMA_STATS_STRING_ENABLED
    10335 
    10336 #endif // #if VMA_STATS_STRING_ENABLED
    10337 
    10338 VmaBlockVector::VmaBlockVector(
    10339  VmaAllocator hAllocator,
    10340  uint32_t memoryTypeIndex,
    10341  VkDeviceSize preferredBlockSize,
    10342  size_t minBlockCount,
    10343  size_t maxBlockCount,
    10344  VkDeviceSize bufferImageGranularity,
    10345  uint32_t frameInUseCount,
    10346  bool isCustomPool,
    10347  bool explicitBlockSize,
    10348  uint32_t algorithm) :
    10349  m_hAllocator(hAllocator),
    10350  m_MemoryTypeIndex(memoryTypeIndex),
    10351  m_PreferredBlockSize(preferredBlockSize),
    10352  m_MinBlockCount(minBlockCount),
    10353  m_MaxBlockCount(maxBlockCount),
    10354  m_BufferImageGranularity(bufferImageGranularity),
    10355  m_FrameInUseCount(frameInUseCount),
    10356  m_IsCustomPool(isCustomPool),
    10357  m_ExplicitBlockSize(explicitBlockSize),
    10358  m_Algorithm(algorithm),
    10359  m_HasEmptyBlock(false),
    10360  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10361  m_pDefragmentator(VMA_NULL),
    10362  m_NextBlockId(0)
    10363 {
    10364 }
    10365 
    10366 VmaBlockVector::~VmaBlockVector()
    10367 {
    10368  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10369 
    10370  for(size_t i = m_Blocks.size(); i--; )
    10371  {
    10372  m_Blocks[i]->Destroy(m_hAllocator);
    10373  vma_delete(m_hAllocator, m_Blocks[i]);
    10374  }
    10375 }
    10376 
    10377 VkResult VmaBlockVector::CreateMinBlocks()
    10378 {
    10379  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10380  {
    10381  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10382  if(res != VK_SUCCESS)
    10383  {
    10384  return res;
    10385  }
    10386  }
    10387  return VK_SUCCESS;
    10388 }
    10389 
    10390 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10391 {
    10392  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10393 
    10394  const size_t blockCount = m_Blocks.size();
    10395 
    10396  pStats->size = 0;
    10397  pStats->unusedSize = 0;
    10398  pStats->allocationCount = 0;
    10399  pStats->unusedRangeCount = 0;
    10400  pStats->unusedRangeSizeMax = 0;
    10401  pStats->blockCount = blockCount;
    10402 
    10403  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10404  {
    10405  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10406  VMA_ASSERT(pBlock);
    10407  VMA_HEAVY_ASSERT(pBlock->Validate());
    10408  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10409  }
    10410 }
    10411 
    10412 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10413 {
    10414  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10415  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10416  (VMA_DEBUG_MARGIN > 0) &&
    10417  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10418 }
    10419 
    10420 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10421 
    10422 VkResult VmaBlockVector::Allocate(
    10423  VmaPool hCurrentPool,
    10424  uint32_t currentFrameIndex,
    10425  VkDeviceSize size,
    10426  VkDeviceSize alignment,
    10427  const VmaAllocationCreateInfo& createInfo,
    10428  VmaSuballocationType suballocType,
    10429  VmaAllocation* pAllocation)
    10430 {
    10431  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10432  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10433  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10434  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10435  const bool canCreateNewBlock =
    10436  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10437  (m_Blocks.size() < m_MaxBlockCount);
    10438  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10439 
    10440  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10441  // Which in turn is available only when maxBlockCount = 1.
    10442  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10443  {
    10444  canMakeOtherLost = false;
    10445  }
    10446 
    10447  // Upper address can only be used with linear allocator and within single memory block.
    10448  if(isUpperAddress &&
    10449  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10450  {
    10451  return VK_ERROR_FEATURE_NOT_PRESENT;
    10452  }
    10453 
    10454  // Validate strategy.
    10455  switch(strategy)
    10456  {
    10457  case 0:
    10459  break;
    10463  break;
    10464  default:
    10465  return VK_ERROR_FEATURE_NOT_PRESENT;
    10466  }
    10467 
    10468  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10469  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10470  {
    10471  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10472  }
    10473 
    10474  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10475 
    10476  /*
    10477  Under certain condition, this whole section can be skipped for optimization, so
    10478  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10479  e.g. for custom pools with linear algorithm.
    10480  */
    10481  if(!canMakeOtherLost || canCreateNewBlock)
    10482  {
    10483  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10484  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10486 
    10487  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10488  {
    10489  // Use only last block.
    10490  if(!m_Blocks.empty())
    10491  {
    10492  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10493  VMA_ASSERT(pCurrBlock);
    10494  VkResult res = AllocateFromBlock(
    10495  pCurrBlock,
    10496  hCurrentPool,
    10497  currentFrameIndex,
    10498  size,
    10499  alignment,
    10500  allocFlagsCopy,
    10501  createInfo.pUserData,
    10502  suballocType,
    10503  strategy,
    10504  pAllocation);
    10505  if(res == VK_SUCCESS)
    10506  {
    10507  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10508  return VK_SUCCESS;
    10509  }
    10510  }
    10511  }
    10512  else
    10513  {
    10515  {
    10516  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10517  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10518  {
    10519  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10520  VMA_ASSERT(pCurrBlock);
    10521  VkResult res = AllocateFromBlock(
    10522  pCurrBlock,
    10523  hCurrentPool,
    10524  currentFrameIndex,
    10525  size,
    10526  alignment,
    10527  allocFlagsCopy,
    10528  createInfo.pUserData,
    10529  suballocType,
    10530  strategy,
    10531  pAllocation);
    10532  if(res == VK_SUCCESS)
    10533  {
    10534  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10535  return VK_SUCCESS;
    10536  }
    10537  }
    10538  }
    10539  else // WORST_FIT, FIRST_FIT
    10540  {
    10541  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10542  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10543  {
    10544  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10545  VMA_ASSERT(pCurrBlock);
    10546  VkResult res = AllocateFromBlock(
    10547  pCurrBlock,
    10548  hCurrentPool,
    10549  currentFrameIndex,
    10550  size,
    10551  alignment,
    10552  allocFlagsCopy,
    10553  createInfo.pUserData,
    10554  suballocType,
    10555  strategy,
    10556  pAllocation);
    10557  if(res == VK_SUCCESS)
    10558  {
    10559  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10560  return VK_SUCCESS;
    10561  }
    10562  }
    10563  }
    10564  }
    10565 
    10566  // 2. Try to create new block.
    10567  if(canCreateNewBlock)
    10568  {
    10569  // Calculate optimal size for new block.
    10570  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10571  uint32_t newBlockSizeShift = 0;
    10572  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10573 
    10574  if(!m_ExplicitBlockSize)
    10575  {
    10576  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10577  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10578  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10579  {
    10580  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10581  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10582  {
    10583  newBlockSize = smallerNewBlockSize;
    10584  ++newBlockSizeShift;
    10585  }
    10586  else
    10587  {
    10588  break;
    10589  }
    10590  }
    10591  }
    10592 
    10593  size_t newBlockIndex = 0;
    10594  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10595  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10596  if(!m_ExplicitBlockSize)
    10597  {
    10598  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10599  {
    10600  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10601  if(smallerNewBlockSize >= size)
    10602  {
    10603  newBlockSize = smallerNewBlockSize;
    10604  ++newBlockSizeShift;
    10605  res = CreateBlock(newBlockSize, &newBlockIndex);
    10606  }
    10607  else
    10608  {
    10609  break;
    10610  }
    10611  }
    10612  }
    10613 
    10614  if(res == VK_SUCCESS)
    10615  {
    10616  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10617  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10618 
    10619  res = AllocateFromBlock(
    10620  pBlock,
    10621  hCurrentPool,
    10622  currentFrameIndex,
    10623  size,
    10624  alignment,
    10625  allocFlagsCopy,
    10626  createInfo.pUserData,
    10627  suballocType,
    10628  strategy,
    10629  pAllocation);
    10630  if(res == VK_SUCCESS)
    10631  {
    10632  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10633  return VK_SUCCESS;
    10634  }
    10635  else
    10636  {
    10637  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10638  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10639  }
    10640  }
    10641  }
    10642  }
    10643 
    10644  // 3. Try to allocate from existing blocks with making other allocations lost.
    10645  if(canMakeOtherLost)
    10646  {
    10647  uint32_t tryIndex = 0;
    10648  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10649  {
    10650  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10651  VmaAllocationRequest bestRequest = {};
    10652  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10653 
    10654  // 1. Search existing allocations.
    10656  {
    10657  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10658  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10659  {
    10660  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10661  VMA_ASSERT(pCurrBlock);
    10662  VmaAllocationRequest currRequest = {};
    10663  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10664  currentFrameIndex,
    10665  m_FrameInUseCount,
    10666  m_BufferImageGranularity,
    10667  size,
    10668  alignment,
    10669  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10670  suballocType,
    10671  canMakeOtherLost,
    10672  strategy,
    10673  &currRequest))
    10674  {
    10675  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10676  if(pBestRequestBlock == VMA_NULL ||
    10677  currRequestCost < bestRequestCost)
    10678  {
    10679  pBestRequestBlock = pCurrBlock;
    10680  bestRequest = currRequest;
    10681  bestRequestCost = currRequestCost;
    10682 
    10683  if(bestRequestCost == 0)
    10684  {
    10685  break;
    10686  }
    10687  }
    10688  }
    10689  }
    10690  }
    10691  else // WORST_FIT, FIRST_FIT
    10692  {
    10693  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10694  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10695  {
    10696  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10697  VMA_ASSERT(pCurrBlock);
    10698  VmaAllocationRequest currRequest = {};
    10699  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10700  currentFrameIndex,
    10701  m_FrameInUseCount,
    10702  m_BufferImageGranularity,
    10703  size,
    10704  alignment,
    10705  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10706  suballocType,
    10707  canMakeOtherLost,
    10708  strategy,
    10709  &currRequest))
    10710  {
    10711  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10712  if(pBestRequestBlock == VMA_NULL ||
    10713  currRequestCost < bestRequestCost ||
    10715  {
    10716  pBestRequestBlock = pCurrBlock;
    10717  bestRequest = currRequest;
    10718  bestRequestCost = currRequestCost;
    10719 
    10720  if(bestRequestCost == 0 ||
    10722  {
    10723  break;
    10724  }
    10725  }
    10726  }
    10727  }
    10728  }
    10729 
    10730  if(pBestRequestBlock != VMA_NULL)
    10731  {
    10732  if(mapped)
    10733  {
    10734  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10735  if(res != VK_SUCCESS)
    10736  {
    10737  return res;
    10738  }
    10739  }
    10740 
    10741  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10742  currentFrameIndex,
    10743  m_FrameInUseCount,
    10744  &bestRequest))
    10745  {
    10746  // We no longer have an empty Allocation.
    10747  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10748  {
    10749  m_HasEmptyBlock = false;
    10750  }
    10751  // Allocate from this pBlock.
    10752  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10753  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10754  (*pAllocation)->InitBlockAllocation(
    10755  hCurrentPool,
    10756  pBestRequestBlock,
    10757  bestRequest.offset,
    10758  alignment,
    10759  size,
    10760  suballocType,
    10761  mapped,
    10762  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10763  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10764  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10765  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10766  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10767  {
    10768  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10769  }
    10770  if(IsCorruptionDetectionEnabled())
    10771  {
    10772  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10773  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10774  }
    10775  return VK_SUCCESS;
    10776  }
    10777  // else: Some allocations must have been touched while we are here. Next try.
    10778  }
    10779  else
    10780  {
    10781  // Could not find place in any of the blocks - break outer loop.
    10782  break;
    10783  }
    10784  }
    10785  /* Maximum number of tries exceeded - a very unlike event when many other
    10786  threads are simultaneously touching allocations making it impossible to make
    10787  lost at the same time as we try to allocate. */
    10788  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10789  {
    10790  return VK_ERROR_TOO_MANY_OBJECTS;
    10791  }
    10792  }
    10793 
    10794  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10795 }
    10796 
    10797 void VmaBlockVector::Free(
    10798  VmaAllocation hAllocation)
    10799 {
    10800  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10801 
    10802  // Scope for lock.
    10803  {
    10804  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10805 
    10806  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10807 
    10808  if(IsCorruptionDetectionEnabled())
    10809  {
    10810  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10811  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10812  }
    10813 
    10814  if(hAllocation->IsPersistentMap())
    10815  {
    10816  pBlock->Unmap(m_hAllocator, 1);
    10817  }
    10818 
    10819  pBlock->m_pMetadata->Free(hAllocation);
    10820  VMA_HEAVY_ASSERT(pBlock->Validate());
    10821 
    10822  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10823 
    10824  // pBlock became empty after this deallocation.
    10825  if(pBlock->m_pMetadata->IsEmpty())
    10826  {
    10827  // Already has empty Allocation. We don't want to have two, so delete this one.
    10828  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10829  {
    10830  pBlockToDelete = pBlock;
    10831  Remove(pBlock);
    10832  }
    10833  // We now have first empty block.
    10834  else
    10835  {
    10836  m_HasEmptyBlock = true;
    10837  }
    10838  }
    10839  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10840  // (This is optional, heuristics.)
    10841  else if(m_HasEmptyBlock)
    10842  {
    10843  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10844  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10845  {
    10846  pBlockToDelete = pLastBlock;
    10847  m_Blocks.pop_back();
    10848  m_HasEmptyBlock = false;
    10849  }
    10850  }
    10851 
    10852  IncrementallySortBlocks();
    10853  }
    10854 
    10855  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10856  // lock, for performance reason.
    10857  if(pBlockToDelete != VMA_NULL)
    10858  {
    10859  VMA_DEBUG_LOG(" Deleted empty allocation");
    10860  pBlockToDelete->Destroy(m_hAllocator);
    10861  vma_delete(m_hAllocator, pBlockToDelete);
    10862  }
    10863 }
    10864 
    10865 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10866 {
    10867  VkDeviceSize result = 0;
    10868  for(size_t i = m_Blocks.size(); i--; )
    10869  {
    10870  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10871  if(result >= m_PreferredBlockSize)
    10872  {
    10873  break;
    10874  }
    10875  }
    10876  return result;
    10877 }
    10878 
    10879 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10880 {
    10881  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10882  {
    10883  if(m_Blocks[blockIndex] == pBlock)
    10884  {
    10885  VmaVectorRemove(m_Blocks, blockIndex);
    10886  return;
    10887  }
    10888  }
    10889  VMA_ASSERT(0);
    10890 }
    10891 
    10892 void VmaBlockVector::IncrementallySortBlocks()
    10893 {
    10894  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10895  {
    10896  // Bubble sort only until first swap.
    10897  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10898  {
    10899  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10900  {
    10901  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10902  return;
    10903  }
    10904  }
    10905  }
    10906 }
    10907 
    10908 VkResult VmaBlockVector::AllocateFromBlock(
    10909  VmaDeviceMemoryBlock* pBlock,
    10910  VmaPool hCurrentPool,
    10911  uint32_t currentFrameIndex,
    10912  VkDeviceSize size,
    10913  VkDeviceSize alignment,
    10914  VmaAllocationCreateFlags allocFlags,
    10915  void* pUserData,
    10916  VmaSuballocationType suballocType,
    10917  uint32_t strategy,
    10918  VmaAllocation* pAllocation)
    10919 {
    10920  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10921  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10922  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10923  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10924 
    10925  VmaAllocationRequest currRequest = {};
    10926  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10927  currentFrameIndex,
    10928  m_FrameInUseCount,
    10929  m_BufferImageGranularity,
    10930  size,
    10931  alignment,
    10932  isUpperAddress,
    10933  suballocType,
    10934  false, // canMakeOtherLost
    10935  strategy,
    10936  &currRequest))
    10937  {
    10938  // Allocate from pCurrBlock.
    10939  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10940 
    10941  if(mapped)
    10942  {
    10943  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10944  if(res != VK_SUCCESS)
    10945  {
    10946  return res;
    10947  }
    10948  }
    10949 
    10950  // We no longer have an empty Allocation.
    10951  if(pBlock->m_pMetadata->IsEmpty())
    10952  {
    10953  m_HasEmptyBlock = false;
    10954  }
    10955 
    10956  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10957  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10958  (*pAllocation)->InitBlockAllocation(
    10959  hCurrentPool,
    10960  pBlock,
    10961  currRequest.offset,
    10962  alignment,
    10963  size,
    10964  suballocType,
    10965  mapped,
    10966  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10967  VMA_HEAVY_ASSERT(pBlock->Validate());
    10968  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10969  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10970  {
    10971  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10972  }
    10973  if(IsCorruptionDetectionEnabled())
    10974  {
    10975  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10976  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10977  }
    10978  return VK_SUCCESS;
    10979  }
    10980  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10981 }
    10982 
    10983 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10984 {
    10985  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10986  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10987  allocInfo.allocationSize = blockSize;
    10988  VkDeviceMemory mem = VK_NULL_HANDLE;
    10989  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10990  if(res < 0)
    10991  {
    10992  return res;
    10993  }
    10994 
    10995  // New VkDeviceMemory successfully created.
    10996 
    10997  // Create new Allocation for it.
    10998  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    10999  pBlock->Init(
    11000  m_hAllocator,
    11001  m_MemoryTypeIndex,
    11002  mem,
    11003  allocInfo.allocationSize,
    11004  m_NextBlockId++,
    11005  m_Algorithm);
    11006 
    11007  m_Blocks.push_back(pBlock);
    11008  if(pNewBlockIndex != VMA_NULL)
    11009  {
    11010  *pNewBlockIndex = m_Blocks.size() - 1;
    11011  }
    11012 
    11013  return VK_SUCCESS;
    11014 }
    11015 
    11016 #if VMA_STATS_STRING_ENABLED
    11017 
    11018 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11019 {
    11020  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11021 
    11022  json.BeginObject();
    11023 
    11024  if(m_IsCustomPool)
    11025  {
    11026  json.WriteString("MemoryTypeIndex");
    11027  json.WriteNumber(m_MemoryTypeIndex);
    11028 
    11029  json.WriteString("BlockSize");
    11030  json.WriteNumber(m_PreferredBlockSize);
    11031 
    11032  json.WriteString("BlockCount");
    11033  json.BeginObject(true);
    11034  if(m_MinBlockCount > 0)
    11035  {
    11036  json.WriteString("Min");
    11037  json.WriteNumber((uint64_t)m_MinBlockCount);
    11038  }
    11039  if(m_MaxBlockCount < SIZE_MAX)
    11040  {
    11041  json.WriteString("Max");
    11042  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11043  }
    11044  json.WriteString("Cur");
    11045  json.WriteNumber((uint64_t)m_Blocks.size());
    11046  json.EndObject();
    11047 
    11048  if(m_FrameInUseCount > 0)
    11049  {
    11050  json.WriteString("FrameInUseCount");
    11051  json.WriteNumber(m_FrameInUseCount);
    11052  }
    11053 
    11054  if(m_Algorithm != 0)
    11055  {
    11056  json.WriteString("Algorithm");
    11057  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11058  }
    11059  }
    11060  else
    11061  {
    11062  json.WriteString("PreferredBlockSize");
    11063  json.WriteNumber(m_PreferredBlockSize);
    11064  }
    11065 
    11066  json.WriteString("Blocks");
    11067  json.BeginObject();
    11068  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11069  {
    11070  json.BeginString();
    11071  json.ContinueString(m_Blocks[i]->GetId());
    11072  json.EndString();
    11073 
    11074  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11075  }
    11076  json.EndObject();
    11077 
    11078  json.EndObject();
    11079 }
    11080 
    11081 #endif // #if VMA_STATS_STRING_ENABLED
    11082 
    11083 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11084  VmaAllocator hAllocator,
    11085  uint32_t currentFrameIndex)
    11086 {
    11087  if(m_pDefragmentator == VMA_NULL)
    11088  {
    11089  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11090  hAllocator,
    11091  this,
    11092  currentFrameIndex);
    11093  }
    11094 
    11095  return m_pDefragmentator;
    11096 }
    11097 
    11098 VkResult VmaBlockVector::Defragment(
    11099  VmaDefragmentationStats* pDefragmentationStats,
    11100  VkDeviceSize& maxBytesToMove,
    11101  uint32_t& maxAllocationsToMove)
    11102 {
    11103  if(m_pDefragmentator == VMA_NULL)
    11104  {
    11105  return VK_SUCCESS;
    11106  }
    11107 
    11108  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11109 
    11110  // Defragment.
    11111  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11112 
    11113  // Accumulate statistics.
    11114  if(pDefragmentationStats != VMA_NULL)
    11115  {
    11116  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11117  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11118  pDefragmentationStats->bytesMoved += bytesMoved;
    11119  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11120  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11121  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11122  maxBytesToMove -= bytesMoved;
    11123  maxAllocationsToMove -= allocationsMoved;
    11124  }
    11125 
    11126  // Free empty blocks.
    11127  m_HasEmptyBlock = false;
    11128  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11129  {
    11130  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11131  if(pBlock->m_pMetadata->IsEmpty())
    11132  {
    11133  if(m_Blocks.size() > m_MinBlockCount)
    11134  {
    11135  if(pDefragmentationStats != VMA_NULL)
    11136  {
    11137  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11138  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11139  }
    11140 
    11141  VmaVectorRemove(m_Blocks, blockIndex);
    11142  pBlock->Destroy(m_hAllocator);
    11143  vma_delete(m_hAllocator, pBlock);
    11144  }
    11145  else
    11146  {
    11147  m_HasEmptyBlock = true;
    11148  }
    11149  }
    11150  }
    11151 
    11152  return result;
    11153 }
    11154 
    11155 void VmaBlockVector::DestroyDefragmentator()
    11156 {
    11157  if(m_pDefragmentator != VMA_NULL)
    11158  {
    11159  vma_delete(m_hAllocator, m_pDefragmentator);
    11160  m_pDefragmentator = VMA_NULL;
    11161  }
    11162 }
    11163 
    11164 void VmaBlockVector::MakePoolAllocationsLost(
    11165  uint32_t currentFrameIndex,
    11166  size_t* pLostAllocationCount)
    11167 {
    11168  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11169  size_t lostAllocationCount = 0;
    11170  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11171  {
    11172  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11173  VMA_ASSERT(pBlock);
    11174  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11175  }
    11176  if(pLostAllocationCount != VMA_NULL)
    11177  {
    11178  *pLostAllocationCount = lostAllocationCount;
    11179  }
    11180 }
    11181 
    11182 VkResult VmaBlockVector::CheckCorruption()
    11183 {
    11184  if(!IsCorruptionDetectionEnabled())
    11185  {
    11186  return VK_ERROR_FEATURE_NOT_PRESENT;
    11187  }
    11188 
    11189  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11190  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11191  {
    11192  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11193  VMA_ASSERT(pBlock);
    11194  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11195  if(res != VK_SUCCESS)
    11196  {
    11197  return res;
    11198  }
    11199  }
    11200  return VK_SUCCESS;
    11201 }
    11202 
    11203 void VmaBlockVector::AddStats(VmaStats* pStats)
    11204 {
    11205  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11206  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11207 
    11208  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11209 
    11210  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11211  {
    11212  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11213  VMA_ASSERT(pBlock);
    11214  VMA_HEAVY_ASSERT(pBlock->Validate());
    11215  VmaStatInfo allocationStatInfo;
    11216  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11217  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11218  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11220  }
    11221 }
    11222 
    11224 // VmaDefragmentator members definition
    11225 
    11226 VmaDefragmentator::VmaDefragmentator(
    11227  VmaAllocator hAllocator,
    11228  VmaBlockVector* pBlockVector,
    11229  uint32_t currentFrameIndex) :
    11230  m_hAllocator(hAllocator),
    11231  m_pBlockVector(pBlockVector),
    11232  m_CurrentFrameIndex(currentFrameIndex),
    11233  m_BytesMoved(0),
    11234  m_AllocationsMoved(0),
    11235  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11236  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11237 {
    11238  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11239 }
    11240 
    11241 VmaDefragmentator::~VmaDefragmentator()
    11242 {
    11243  for(size_t i = m_Blocks.size(); i--; )
    11244  {
    11245  vma_delete(m_hAllocator, m_Blocks[i]);
    11246  }
    11247 }
    11248 
    11249 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11250 {
    11251  AllocationInfo allocInfo;
    11252  allocInfo.m_hAllocation = hAlloc;
    11253  allocInfo.m_pChanged = pChanged;
    11254  m_Allocations.push_back(allocInfo);
    11255 }
    11256 
    11257 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11258 {
    11259  // It has already been mapped for defragmentation.
    11260  if(m_pMappedDataForDefragmentation)
    11261  {
    11262  *ppMappedData = m_pMappedDataForDefragmentation;
    11263  return VK_SUCCESS;
    11264  }
    11265 
    11266  // It is originally mapped.
    11267  if(m_pBlock->GetMappedData())
    11268  {
    11269  *ppMappedData = m_pBlock->GetMappedData();
    11270  return VK_SUCCESS;
    11271  }
    11272 
    11273  // Map on first usage.
    11274  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11275  *ppMappedData = m_pMappedDataForDefragmentation;
    11276  return res;
    11277 }
    11278 
    11279 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11280 {
    11281  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11282  {
    11283  m_pBlock->Unmap(hAllocator, 1);
    11284  }
    11285 }
    11286 
    11287 VkResult VmaDefragmentator::DefragmentRound(
    11288  VkDeviceSize maxBytesToMove,
    11289  uint32_t maxAllocationsToMove)
    11290 {
    11291  if(m_Blocks.empty())
    11292  {
    11293  return VK_SUCCESS;
    11294  }
    11295 
    11296  size_t srcBlockIndex = m_Blocks.size() - 1;
    11297  size_t srcAllocIndex = SIZE_MAX;
    11298  for(;;)
    11299  {
    11300  // 1. Find next allocation to move.
    11301  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11302  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11303  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11304  {
    11305  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11306  {
    11307  // Finished: no more allocations to process.
    11308  if(srcBlockIndex == 0)
    11309  {
    11310  return VK_SUCCESS;
    11311  }
    11312  else
    11313  {
    11314  --srcBlockIndex;
    11315  srcAllocIndex = SIZE_MAX;
    11316  }
    11317  }
    11318  else
    11319  {
    11320  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11321  }
    11322  }
    11323 
    11324  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11325  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11326 
    11327  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11328  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11329  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11330  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11331 
    11332  // 2. Try to find new place for this allocation in preceding or current block.
    11333  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11334  {
    11335  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11336  VmaAllocationRequest dstAllocRequest;
    11337  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11338  m_CurrentFrameIndex,
    11339  m_pBlockVector->GetFrameInUseCount(),
    11340  m_pBlockVector->GetBufferImageGranularity(),
    11341  size,
    11342  alignment,
    11343  false, // upperAddress
    11344  suballocType,
    11345  false, // canMakeOtherLost
    11347  &dstAllocRequest) &&
    11348  MoveMakesSense(
    11349  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11350  {
    11351  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11352 
    11353  // Reached limit on number of allocations or bytes to move.
    11354  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11355  (m_BytesMoved + size > maxBytesToMove))
    11356  {
    11357  return VK_INCOMPLETE;
    11358  }
    11359 
    11360  void* pDstMappedData = VMA_NULL;
    11361  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11362  if(res != VK_SUCCESS)
    11363  {
    11364  return res;
    11365  }
    11366 
    11367  void* pSrcMappedData = VMA_NULL;
    11368  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11369  if(res != VK_SUCCESS)
    11370  {
    11371  return res;
    11372  }
    11373 
    11374  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11375  memcpy(
    11376  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11377  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11378  static_cast<size_t>(size));
    11379 
    11380  if(VMA_DEBUG_MARGIN > 0)
    11381  {
    11382  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11384  }
    11385 
    11386  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11387  dstAllocRequest,
    11388  suballocType,
    11389  size,
    11390  false, // upperAddress
    11391  allocInfo.m_hAllocation);
    11392  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11393 
    11394  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11395 
    11396  if(allocInfo.m_pChanged != VMA_NULL)
    11397  {
    11398  *allocInfo.m_pChanged = VK_TRUE;
    11399  }
    11400 
    11401  ++m_AllocationsMoved;
    11402  m_BytesMoved += size;
    11403 
    11404  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11405 
    11406  break;
    11407  }
    11408  }
    11409 
    11410  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11411 
    11412  if(srcAllocIndex > 0)
    11413  {
    11414  --srcAllocIndex;
    11415  }
    11416  else
    11417  {
    11418  if(srcBlockIndex > 0)
    11419  {
    11420  --srcBlockIndex;
    11421  srcAllocIndex = SIZE_MAX;
    11422  }
    11423  else
    11424  {
    11425  return VK_SUCCESS;
    11426  }
    11427  }
    11428  }
    11429 }
    11430 
    11431 VkResult VmaDefragmentator::Defragment(
    11432  VkDeviceSize maxBytesToMove,
    11433  uint32_t maxAllocationsToMove)
    11434 {
    11435  if(m_Allocations.empty())
    11436  {
    11437  return VK_SUCCESS;
    11438  }
    11439 
    11440  // Create block info for each block.
    11441  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11442  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11443  {
    11444  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11445  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11446  m_Blocks.push_back(pBlockInfo);
    11447  }
    11448 
    11449  // Sort them by m_pBlock pointer value.
    11450  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11451 
    11452  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11453  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11454  {
    11455  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11456  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11457  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11458  {
    11459  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11460  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11461  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11462  {
    11463  (*it)->m_Allocations.push_back(allocInfo);
    11464  }
    11465  else
    11466  {
    11467  VMA_ASSERT(0);
    11468  }
    11469  }
    11470  }
    11471  m_Allocations.clear();
    11472 
    11473  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11474  {
    11475  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11476  pBlockInfo->CalcHasNonMovableAllocations();
    11477  pBlockInfo->SortAllocationsBySizeDescecnding();
    11478  }
    11479 
    11480  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11481  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11482 
    11483  // Execute defragmentation rounds (the main part).
    11484  VkResult result = VK_SUCCESS;
    11485  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11486  {
    11487  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11488  }
    11489 
    11490  // Unmap blocks that were mapped for defragmentation.
    11491  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11492  {
    11493  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11494  }
    11495 
    11496  return result;
    11497 }
    11498 
    11499 bool VmaDefragmentator::MoveMakesSense(
    11500  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11501  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11502 {
    11503  if(dstBlockIndex < srcBlockIndex)
    11504  {
    11505  return true;
    11506  }
    11507  if(dstBlockIndex > srcBlockIndex)
    11508  {
    11509  return false;
    11510  }
    11511  if(dstOffset < srcOffset)
    11512  {
    11513  return true;
    11514  }
    11515  return false;
    11516 }
    11517 
    11519 // VmaRecorder
    11520 
    11521 #if VMA_RECORDING_ENABLED
    11522 
    11523 VmaRecorder::VmaRecorder() :
    11524  m_UseMutex(true),
    11525  m_Flags(0),
    11526  m_File(VMA_NULL),
    11527  m_Freq(INT64_MAX),
    11528  m_StartCounter(INT64_MAX)
    11529 {
    11530 }
    11531 
    11532 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11533 {
    11534  m_UseMutex = useMutex;
    11535  m_Flags = settings.flags;
    11536 
    11537  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11538  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11539 
    11540  // Open file for writing.
    11541  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11542  if(err != 0)
    11543  {
    11544  return VK_ERROR_INITIALIZATION_FAILED;
    11545  }
    11546 
    11547  // Write header.
    11548  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11549  fprintf(m_File, "%s\n", "1,4");
    11550 
    11551  return VK_SUCCESS;
    11552 }
    11553 
    11554 VmaRecorder::~VmaRecorder()
    11555 {
    11556  if(m_File != VMA_NULL)
    11557  {
    11558  fclose(m_File);
    11559  }
    11560 }
    11561 
    11562 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11563 {
    11564  CallParams callParams;
    11565  GetBasicParams(callParams);
    11566 
    11567  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11568  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11569  Flush();
    11570 }
    11571 
    11572 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11573 {
    11574  CallParams callParams;
    11575  GetBasicParams(callParams);
    11576 
    11577  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11578  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11579  Flush();
    11580 }
    11581 
    11582 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11583 {
    11584  CallParams callParams;
    11585  GetBasicParams(callParams);
    11586 
    11587  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11588  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11589  createInfo.memoryTypeIndex,
    11590  createInfo.flags,
    11591  createInfo.blockSize,
    11592  (uint64_t)createInfo.minBlockCount,
    11593  (uint64_t)createInfo.maxBlockCount,
    11594  createInfo.frameInUseCount,
    11595  pool);
    11596  Flush();
    11597 }
    11598 
    11599 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11600 {
    11601  CallParams callParams;
    11602  GetBasicParams(callParams);
    11603 
    11604  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11605  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11606  pool);
    11607  Flush();
    11608 }
    11609 
    11610 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11611  const VkMemoryRequirements& vkMemReq,
    11612  const VmaAllocationCreateInfo& createInfo,
    11613  VmaAllocation allocation)
    11614 {
    11615  CallParams callParams;
    11616  GetBasicParams(callParams);
    11617 
    11618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11619  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11620  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11621  vkMemReq.size,
    11622  vkMemReq.alignment,
    11623  vkMemReq.memoryTypeBits,
    11624  createInfo.flags,
    11625  createInfo.usage,
    11626  createInfo.requiredFlags,
    11627  createInfo.preferredFlags,
    11628  createInfo.memoryTypeBits,
    11629  createInfo.pool,
    11630  allocation,
    11631  userDataStr.GetString());
    11632  Flush();
    11633 }
    11634 
    11635 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11636  const VkMemoryRequirements& vkMemReq,
    11637  bool requiresDedicatedAllocation,
    11638  bool prefersDedicatedAllocation,
    11639  const VmaAllocationCreateInfo& createInfo,
    11640  VmaAllocation allocation)
    11641 {
    11642  CallParams callParams;
    11643  GetBasicParams(callParams);
    11644 
    11645  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11646  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11647  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11648  vkMemReq.size,
    11649  vkMemReq.alignment,
    11650  vkMemReq.memoryTypeBits,
    11651  requiresDedicatedAllocation ? 1 : 0,
    11652  prefersDedicatedAllocation ? 1 : 0,
    11653  createInfo.flags,
    11654  createInfo.usage,
    11655  createInfo.requiredFlags,
    11656  createInfo.preferredFlags,
    11657  createInfo.memoryTypeBits,
    11658  createInfo.pool,
    11659  allocation,
    11660  userDataStr.GetString());
    11661  Flush();
    11662 }
    11663 
    11664 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11665  const VkMemoryRequirements& vkMemReq,
    11666  bool requiresDedicatedAllocation,
    11667  bool prefersDedicatedAllocation,
    11668  const VmaAllocationCreateInfo& createInfo,
    11669  VmaAllocation allocation)
    11670 {
    11671  CallParams callParams;
    11672  GetBasicParams(callParams);
    11673 
    11674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11675  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11676  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11677  vkMemReq.size,
    11678  vkMemReq.alignment,
    11679  vkMemReq.memoryTypeBits,
    11680  requiresDedicatedAllocation ? 1 : 0,
    11681  prefersDedicatedAllocation ? 1 : 0,
    11682  createInfo.flags,
    11683  createInfo.usage,
    11684  createInfo.requiredFlags,
    11685  createInfo.preferredFlags,
    11686  createInfo.memoryTypeBits,
    11687  createInfo.pool,
    11688  allocation,
    11689  userDataStr.GetString());
    11690  Flush();
    11691 }
    11692 
    11693 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11694  VmaAllocation allocation)
    11695 {
    11696  CallParams callParams;
    11697  GetBasicParams(callParams);
    11698 
    11699  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11700  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11701  allocation);
    11702  Flush();
    11703 }
    11704 
    11705 void VmaRecorder::RecordResizeAllocation(
    11706  uint32_t frameIndex,
    11707  VmaAllocation allocation,
    11708  VkDeviceSize newSize)
    11709 {
    11710  CallParams callParams;
    11711  GetBasicParams(callParams);
    11712 
    11713  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11714  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11715  allocation, newSize);
    11716  Flush();
    11717 }
    11718 
    11719 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11720  VmaAllocation allocation,
    11721  const void* pUserData)
    11722 {
    11723  CallParams callParams;
    11724  GetBasicParams(callParams);
    11725 
    11726  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11727  UserDataString userDataStr(
    11728  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11729  pUserData);
    11730  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11731  allocation,
    11732  userDataStr.GetString());
    11733  Flush();
    11734 }
    11735 
    11736 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11737  VmaAllocation allocation)
    11738 {
    11739  CallParams callParams;
    11740  GetBasicParams(callParams);
    11741 
    11742  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11743  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11744  allocation);
    11745  Flush();
    11746 }
    11747 
    11748 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11749  VmaAllocation allocation)
    11750 {
    11751  CallParams callParams;
    11752  GetBasicParams(callParams);
    11753 
    11754  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11755  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11756  allocation);
    11757  Flush();
    11758 }
    11759 
    11760 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11761  VmaAllocation allocation)
    11762 {
    11763  CallParams callParams;
    11764  GetBasicParams(callParams);
    11765 
    11766  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11767  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11768  allocation);
    11769  Flush();
    11770 }
    11771 
    11772 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11773  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11774 {
    11775  CallParams callParams;
    11776  GetBasicParams(callParams);
    11777 
    11778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11779  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11780  allocation,
    11781  offset,
    11782  size);
    11783  Flush();
    11784 }
    11785 
    11786 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11787  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11788 {
    11789  CallParams callParams;
    11790  GetBasicParams(callParams);
    11791 
    11792  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11793  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11794  allocation,
    11795  offset,
    11796  size);
    11797  Flush();
    11798 }
    11799 
    11800 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11801  const VkBufferCreateInfo& bufCreateInfo,
    11802  const VmaAllocationCreateInfo& allocCreateInfo,
    11803  VmaAllocation allocation)
    11804 {
    11805  CallParams callParams;
    11806  GetBasicParams(callParams);
    11807 
    11808  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11809  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11810  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11811  bufCreateInfo.flags,
    11812  bufCreateInfo.size,
    11813  bufCreateInfo.usage,
    11814  bufCreateInfo.sharingMode,
    11815  allocCreateInfo.flags,
    11816  allocCreateInfo.usage,
    11817  allocCreateInfo.requiredFlags,
    11818  allocCreateInfo.preferredFlags,
    11819  allocCreateInfo.memoryTypeBits,
    11820  allocCreateInfo.pool,
    11821  allocation,
    11822  userDataStr.GetString());
    11823  Flush();
    11824 }
    11825 
    11826 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11827  const VkImageCreateInfo& imageCreateInfo,
    11828  const VmaAllocationCreateInfo& allocCreateInfo,
    11829  VmaAllocation allocation)
    11830 {
    11831  CallParams callParams;
    11832  GetBasicParams(callParams);
    11833 
    11834  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11835  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11836  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11837  imageCreateInfo.flags,
    11838  imageCreateInfo.imageType,
    11839  imageCreateInfo.format,
    11840  imageCreateInfo.extent.width,
    11841  imageCreateInfo.extent.height,
    11842  imageCreateInfo.extent.depth,
    11843  imageCreateInfo.mipLevels,
    11844  imageCreateInfo.arrayLayers,
    11845  imageCreateInfo.samples,
    11846  imageCreateInfo.tiling,
    11847  imageCreateInfo.usage,
    11848  imageCreateInfo.sharingMode,
    11849  imageCreateInfo.initialLayout,
    11850  allocCreateInfo.flags,
    11851  allocCreateInfo.usage,
    11852  allocCreateInfo.requiredFlags,
    11853  allocCreateInfo.preferredFlags,
    11854  allocCreateInfo.memoryTypeBits,
    11855  allocCreateInfo.pool,
    11856  allocation,
    11857  userDataStr.GetString());
    11858  Flush();
    11859 }
    11860 
    11861 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11862  VmaAllocation allocation)
    11863 {
    11864  CallParams callParams;
    11865  GetBasicParams(callParams);
    11866 
    11867  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11868  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11869  allocation);
    11870  Flush();
    11871 }
    11872 
    11873 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11874  VmaAllocation allocation)
    11875 {
    11876  CallParams callParams;
    11877  GetBasicParams(callParams);
    11878 
    11879  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11880  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11881  allocation);
    11882  Flush();
    11883 }
    11884 
    11885 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11886  VmaAllocation allocation)
    11887 {
    11888  CallParams callParams;
    11889  GetBasicParams(callParams);
    11890 
    11891  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11892  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11893  allocation);
    11894  Flush();
    11895 }
    11896 
    11897 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11898  VmaAllocation allocation)
    11899 {
    11900  CallParams callParams;
    11901  GetBasicParams(callParams);
    11902 
    11903  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11904  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11905  allocation);
    11906  Flush();
    11907 }
    11908 
    11909 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11910  VmaPool pool)
    11911 {
    11912  CallParams callParams;
    11913  GetBasicParams(callParams);
    11914 
    11915  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11916  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11917  pool);
    11918  Flush();
    11919 }
    11920 
    11921 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11922 {
    11923  if(pUserData != VMA_NULL)
    11924  {
    11925  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11926  {
    11927  m_Str = (const char*)pUserData;
    11928  }
    11929  else
    11930  {
    11931  sprintf_s(m_PtrStr, "%p", pUserData);
    11932  m_Str = m_PtrStr;
    11933  }
    11934  }
    11935  else
    11936  {
    11937  m_Str = "";
    11938  }
    11939 }
    11940 
    11941 void VmaRecorder::WriteConfiguration(
    11942  const VkPhysicalDeviceProperties& devProps,
    11943  const VkPhysicalDeviceMemoryProperties& memProps,
    11944  bool dedicatedAllocationExtensionEnabled)
    11945 {
    11946  fprintf(m_File, "Config,Begin\n");
    11947 
    11948  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11949  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11950  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11951  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11952  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11953  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11954 
    11955  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11956  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11957  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11958 
    11959  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11960  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11961  {
    11962  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11964  }
    11965  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11966  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11967  {
    11968  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11970  }
    11971 
    11972  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11973 
    11974  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11981  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11982  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11983 
    11984  fprintf(m_File, "Config,End\n");
    11985 }
    11986 
    11987 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11988 {
    11989  outParams.threadId = GetCurrentThreadId();
    11990 
    11991  LARGE_INTEGER counter;
    11992  QueryPerformanceCounter(&counter);
    11993  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11994 }
    11995 
    11996 void VmaRecorder::Flush()
    11997 {
    11998  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    11999  {
    12000  fflush(m_File);
    12001  }
    12002 }
    12003 
    12004 #endif // #if VMA_RECORDING_ENABLED
    12005 
    12007 // VmaAllocator_T
    12008 
    12009 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12010  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12011  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12012  m_hDevice(pCreateInfo->device),
    12013  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12014  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12015  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12016  m_PreferredLargeHeapBlockSize(0),
    12017  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12018  m_CurrentFrameIndex(0),
    12019  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12020  m_NextPoolId(0)
    12022  ,m_pRecorder(VMA_NULL)
    12023 #endif
    12024 {
    12025  if(VMA_DEBUG_DETECT_CORRUPTION)
    12026  {
    12027  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12028  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12029  }
    12030 
    12031  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12032 
    12033 #if !(VMA_DEDICATED_ALLOCATION)
    12035  {
    12036  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12037  }
    12038 #endif
    12039 
    12040  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12041  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12042  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12043 
    12044  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12045  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12046 
    12047  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12048  {
    12049  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12050  }
    12051 
    12052  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12053  {
    12054  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12055  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12056  }
    12057 
    12058  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12059 
    12060  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12062 
    12063  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12065  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12067 
    12068  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12069  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12070 
    12071  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12072  {
    12073  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12074  {
    12075  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12076  if(limit != VK_WHOLE_SIZE)
    12077  {
    12078  m_HeapSizeLimit[heapIndex] = limit;
    12079  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12080  {
    12081  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12082  }
    12083  }
    12084  }
    12085  }
    12086 
    12087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12088  {
    12089  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12090 
    12091  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12092  this,
    12093  memTypeIndex,
    12094  preferredBlockSize,
    12095  0,
    12096  SIZE_MAX,
    12097  GetBufferImageGranularity(),
    12098  pCreateInfo->frameInUseCount,
    12099  false, // isCustomPool
    12100  false, // explicitBlockSize
    12101  false); // linearAlgorithm
    12102  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12103  // becase minBlockCount is 0.
    12104  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12105 
    12106  }
    12107 }
    12108 
    12109 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12110 {
    12111  VkResult res = VK_SUCCESS;
    12112 
    12113  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12114  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12115  {
    12116 #if VMA_RECORDING_ENABLED
    12117  m_pRecorder = vma_new(this, VmaRecorder)();
    12118  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12119  if(res != VK_SUCCESS)
    12120  {
    12121  return res;
    12122  }
    12123  m_pRecorder->WriteConfiguration(
    12124  m_PhysicalDeviceProperties,
    12125  m_MemProps,
    12126  m_UseKhrDedicatedAllocation);
    12127  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12128 #else
    12129  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12130  return VK_ERROR_FEATURE_NOT_PRESENT;
    12131 #endif
    12132  }
    12133 
    12134  return res;
    12135 }
    12136 
    12137 VmaAllocator_T::~VmaAllocator_T()
    12138 {
    12139 #if VMA_RECORDING_ENABLED
    12140  if(m_pRecorder != VMA_NULL)
    12141  {
    12142  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12143  vma_delete(this, m_pRecorder);
    12144  }
    12145 #endif
    12146 
    12147  VMA_ASSERT(m_Pools.empty());
    12148 
    12149  for(size_t i = GetMemoryTypeCount(); i--; )
    12150  {
    12151  vma_delete(this, m_pDedicatedAllocations[i]);
    12152  vma_delete(this, m_pBlockVectors[i]);
    12153  }
    12154 }
    12155 
    12156 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12157 {
    12158 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12159  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12160  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12161  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12162  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12163  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12164  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12165  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12166  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12167  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12168  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12169  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12170  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12171  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12172  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12173  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12174  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12175 #if VMA_DEDICATED_ALLOCATION
    12176  if(m_UseKhrDedicatedAllocation)
    12177  {
    12178  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12179  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12180  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12181  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12182  }
    12183 #endif // #if VMA_DEDICATED_ALLOCATION
    12184 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12185 
    12186 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12187  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12188 
    12189  if(pVulkanFunctions != VMA_NULL)
    12190  {
    12191  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12194  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12198  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12200  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12202  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12204  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12206  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12207 #if VMA_DEDICATED_ALLOCATION
    12208  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12209  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12210 #endif
    12211  }
    12212 
    12213 #undef VMA_COPY_IF_NOT_NULL
    12214 
    12215  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12216  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12217  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12233 #if VMA_DEDICATED_ALLOCATION
    12234  if(m_UseKhrDedicatedAllocation)
    12235  {
    12236  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12238  }
    12239 #endif
    12240 }
    12241 
    12242 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12243 {
    12244  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12245  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12246  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12247  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12248 }
    12249 
    12250 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12251  VkDeviceSize size,
    12252  VkDeviceSize alignment,
    12253  bool dedicatedAllocation,
    12254  VkBuffer dedicatedBuffer,
    12255  VkImage dedicatedImage,
    12256  const VmaAllocationCreateInfo& createInfo,
    12257  uint32_t memTypeIndex,
    12258  VmaSuballocationType suballocType,
    12259  VmaAllocation* pAllocation)
    12260 {
    12261  VMA_ASSERT(pAllocation != VMA_NULL);
    12262  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12263 
    12264  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12265 
    12266  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12267  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12268  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12269  {
    12270  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12271  }
    12272 
    12273  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12274  VMA_ASSERT(blockVector);
    12275 
    12276  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12277  bool preferDedicatedMemory =
    12278  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12279  dedicatedAllocation ||
    12280  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12281  size > preferredBlockSize / 2;
    12282 
    12283  if(preferDedicatedMemory &&
    12284  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12285  finalCreateInfo.pool == VK_NULL_HANDLE)
    12286  {
    12288  }
    12289 
    12290  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12291  {
    12292  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12293  {
    12294  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12295  }
    12296  else
    12297  {
    12298  return AllocateDedicatedMemory(
    12299  size,
    12300  suballocType,
    12301  memTypeIndex,
    12302  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12304  finalCreateInfo.pUserData,
    12305  dedicatedBuffer,
    12306  dedicatedImage,
    12307  pAllocation);
    12308  }
    12309  }
    12310  else
    12311  {
    12312  VkResult res = blockVector->Allocate(
    12313  VK_NULL_HANDLE, // hCurrentPool
    12314  m_CurrentFrameIndex.load(),
    12315  size,
    12316  alignment,
    12317  finalCreateInfo,
    12318  suballocType,
    12319  pAllocation);
    12320  if(res == VK_SUCCESS)
    12321  {
    12322  return res;
    12323  }
    12324 
    12325  // 5. Try dedicated memory.
    12326  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12327  {
    12328  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12329  }
    12330  else
    12331  {
    12332  res = AllocateDedicatedMemory(
    12333  size,
    12334  suballocType,
    12335  memTypeIndex,
    12336  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12338  finalCreateInfo.pUserData,
    12339  dedicatedBuffer,
    12340  dedicatedImage,
    12341  pAllocation);
    12342  if(res == VK_SUCCESS)
    12343  {
    12344  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12345  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12346  return VK_SUCCESS;
    12347  }
    12348  else
    12349  {
    12350  // Everything failed: Return error code.
    12351  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12352  return res;
    12353  }
    12354  }
    12355  }
    12356 }
    12357 
    12358 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12359  VkDeviceSize size,
    12360  VmaSuballocationType suballocType,
    12361  uint32_t memTypeIndex,
    12362  bool map,
    12363  bool isUserDataString,
    12364  void* pUserData,
    12365  VkBuffer dedicatedBuffer,
    12366  VkImage dedicatedImage,
    12367  VmaAllocation* pAllocation)
    12368 {
    12369  VMA_ASSERT(pAllocation);
    12370 
    12371  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12372  allocInfo.memoryTypeIndex = memTypeIndex;
    12373  allocInfo.allocationSize = size;
    12374 
    12375 #if VMA_DEDICATED_ALLOCATION
    12376  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12377  if(m_UseKhrDedicatedAllocation)
    12378  {
    12379  if(dedicatedBuffer != VK_NULL_HANDLE)
    12380  {
    12381  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12382  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12383  allocInfo.pNext = &dedicatedAllocInfo;
    12384  }
    12385  else if(dedicatedImage != VK_NULL_HANDLE)
    12386  {
    12387  dedicatedAllocInfo.image = dedicatedImage;
    12388  allocInfo.pNext = &dedicatedAllocInfo;
    12389  }
    12390  }
    12391 #endif // #if VMA_DEDICATED_ALLOCATION
    12392 
    12393  // Allocate VkDeviceMemory.
    12394  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12395  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12396  if(res < 0)
    12397  {
    12398  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12399  return res;
    12400  }
    12401 
    12402  void* pMappedData = VMA_NULL;
    12403  if(map)
    12404  {
    12405  res = (*m_VulkanFunctions.vkMapMemory)(
    12406  m_hDevice,
    12407  hMemory,
    12408  0,
    12409  VK_WHOLE_SIZE,
    12410  0,
    12411  &pMappedData);
    12412  if(res < 0)
    12413  {
    12414  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12415  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12416  return res;
    12417  }
    12418  }
    12419 
    12420  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12421  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12422  (*pAllocation)->SetUserData(this, pUserData);
    12423  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12424  {
    12425  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12426  }
    12427 
    12428  // Register it in m_pDedicatedAllocations.
    12429  {
    12430  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12431  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12432  VMA_ASSERT(pDedicatedAllocations);
    12433  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12434  }
    12435 
    12436  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12437 
    12438  return VK_SUCCESS;
    12439 }
    12440 
    12441 void VmaAllocator_T::GetBufferMemoryRequirements(
    12442  VkBuffer hBuffer,
    12443  VkMemoryRequirements& memReq,
    12444  bool& requiresDedicatedAllocation,
    12445  bool& prefersDedicatedAllocation) const
    12446 {
    12447 #if VMA_DEDICATED_ALLOCATION
    12448  if(m_UseKhrDedicatedAllocation)
    12449  {
    12450  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12451  memReqInfo.buffer = hBuffer;
    12452 
    12453  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12454 
    12455  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12456  memReq2.pNext = &memDedicatedReq;
    12457 
    12458  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12459 
    12460  memReq = memReq2.memoryRequirements;
    12461  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12462  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12463  }
    12464  else
    12465 #endif // #if VMA_DEDICATED_ALLOCATION
    12466  {
    12467  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12468  requiresDedicatedAllocation = false;
    12469  prefersDedicatedAllocation = false;
    12470  }
    12471 }
    12472 
    12473 void VmaAllocator_T::GetImageMemoryRequirements(
    12474  VkImage hImage,
    12475  VkMemoryRequirements& memReq,
    12476  bool& requiresDedicatedAllocation,
    12477  bool& prefersDedicatedAllocation) const
    12478 {
    12479 #if VMA_DEDICATED_ALLOCATION
    12480  if(m_UseKhrDedicatedAllocation)
    12481  {
    12482  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12483  memReqInfo.image = hImage;
    12484 
    12485  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12486 
    12487  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12488  memReq2.pNext = &memDedicatedReq;
    12489 
    12490  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12491 
    12492  memReq = memReq2.memoryRequirements;
    12493  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12494  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12495  }
    12496  else
    12497 #endif // #if VMA_DEDICATED_ALLOCATION
    12498  {
    12499  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12500  requiresDedicatedAllocation = false;
    12501  prefersDedicatedAllocation = false;
    12502  }
    12503 }
    12504 
    12505 VkResult VmaAllocator_T::AllocateMemory(
    12506  const VkMemoryRequirements& vkMemReq,
    12507  bool requiresDedicatedAllocation,
    12508  bool prefersDedicatedAllocation,
    12509  VkBuffer dedicatedBuffer,
    12510  VkImage dedicatedImage,
    12511  const VmaAllocationCreateInfo& createInfo,
    12512  VmaSuballocationType suballocType,
    12513  VmaAllocation* pAllocation)
    12514 {
    12515  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12516 
    12517  if(vkMemReq.size == 0)
    12518  {
    12519  return VK_ERROR_VALIDATION_FAILED_EXT;
    12520  }
    12521  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12522  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12523  {
    12524  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12525  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12526  }
    12527  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12529  {
    12530  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12531  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12532  }
    12533  if(requiresDedicatedAllocation)
    12534  {
    12535  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12536  {
    12537  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12538  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12539  }
    12540  if(createInfo.pool != VK_NULL_HANDLE)
    12541  {
    12542  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12543  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12544  }
    12545  }
    12546  if((createInfo.pool != VK_NULL_HANDLE) &&
    12547  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12548  {
    12549  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12550  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12551  }
    12552 
    12553  if(createInfo.pool != VK_NULL_HANDLE)
    12554  {
    12555  const VkDeviceSize alignmentForPool = VMA_MAX(
    12556  vkMemReq.alignment,
    12557  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12558  return createInfo.pool->m_BlockVector.Allocate(
    12559  createInfo.pool,
    12560  m_CurrentFrameIndex.load(),
    12561  vkMemReq.size,
    12562  alignmentForPool,
    12563  createInfo,
    12564  suballocType,
    12565  pAllocation);
    12566  }
    12567  else
    12568  {
    12569  // Bit mask of memory Vulkan types acceptable for this allocation.
    12570  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12571  uint32_t memTypeIndex = UINT32_MAX;
    12572  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12573  if(res == VK_SUCCESS)
    12574  {
    12575  VkDeviceSize alignmentForMemType = VMA_MAX(
    12576  vkMemReq.alignment,
    12577  GetMemoryTypeMinAlignment(memTypeIndex));
    12578 
    12579  res = AllocateMemoryOfType(
    12580  vkMemReq.size,
    12581  alignmentForMemType,
    12582  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12583  dedicatedBuffer,
    12584  dedicatedImage,
    12585  createInfo,
    12586  memTypeIndex,
    12587  suballocType,
    12588  pAllocation);
    12589  // Succeeded on first try.
    12590  if(res == VK_SUCCESS)
    12591  {
    12592  return res;
    12593  }
    12594  // Allocation from this memory type failed. Try other compatible memory types.
    12595  else
    12596  {
    12597  for(;;)
    12598  {
    12599  // Remove old memTypeIndex from list of possibilities.
    12600  memoryTypeBits &= ~(1u << memTypeIndex);
    12601  // Find alternative memTypeIndex.
    12602  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12603  if(res == VK_SUCCESS)
    12604  {
    12605  alignmentForMemType = VMA_MAX(
    12606  vkMemReq.alignment,
    12607  GetMemoryTypeMinAlignment(memTypeIndex));
    12608 
    12609  res = AllocateMemoryOfType(
    12610  vkMemReq.size,
    12611  alignmentForMemType,
    12612  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12613  dedicatedBuffer,
    12614  dedicatedImage,
    12615  createInfo,
    12616  memTypeIndex,
    12617  suballocType,
    12618  pAllocation);
    12619  // Allocation from this alternative memory type succeeded.
    12620  if(res == VK_SUCCESS)
    12621  {
    12622  return res;
    12623  }
    12624  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12625  }
    12626  // No other matching memory type index could be found.
    12627  else
    12628  {
    12629  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12630  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12631  }
    12632  }
    12633  }
    12634  }
    12635  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12636  else
    12637  return res;
    12638  }
    12639 }
    12640 
    12641 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12642 {
    12643  VMA_ASSERT(allocation);
    12644 
    12645  if(TouchAllocation(allocation))
    12646  {
    12647  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12648  {
    12649  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12650  }
    12651 
    12652  switch(allocation->GetType())
    12653  {
    12654  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12655  {
    12656  VmaBlockVector* pBlockVector = VMA_NULL;
    12657  VmaPool hPool = allocation->GetPool();
    12658  if(hPool != VK_NULL_HANDLE)
    12659  {
    12660  pBlockVector = &hPool->m_BlockVector;
    12661  }
    12662  else
    12663  {
    12664  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12665  pBlockVector = m_pBlockVectors[memTypeIndex];
    12666  }
    12667  pBlockVector->Free(allocation);
    12668  }
    12669  break;
    12670  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12671  FreeDedicatedMemory(allocation);
    12672  break;
    12673  default:
    12674  VMA_ASSERT(0);
    12675  }
    12676  }
    12677 
    12678  allocation->SetUserData(this, VMA_NULL);
    12679  vma_delete(this, allocation);
    12680 }
    12681 
    12682 VkResult VmaAllocator_T::ResizeAllocation(
    12683  const VmaAllocation alloc,
    12684  VkDeviceSize newSize)
    12685 {
    12686  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12687  {
    12688  return VK_ERROR_VALIDATION_FAILED_EXT;
    12689  }
    12690  if(newSize == alloc->GetSize())
    12691  {
    12692  return VK_SUCCESS;
    12693  }
    12694 
    12695  switch(alloc->GetType())
    12696  {
    12697  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12698  return VK_ERROR_FEATURE_NOT_PRESENT;
    12699  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12700  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12701  {
    12702  alloc->ChangeSize(newSize);
    12703  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12704  return VK_SUCCESS;
    12705  }
    12706  else
    12707  {
    12708  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12709  }
    12710  default:
    12711  VMA_ASSERT(0);
    12712  return VK_ERROR_VALIDATION_FAILED_EXT;
    12713  }
    12714 }
    12715 
    12716 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12717 {
    12718  // Initialize.
    12719  InitStatInfo(pStats->total);
    12720  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12721  InitStatInfo(pStats->memoryType[i]);
    12722  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12723  InitStatInfo(pStats->memoryHeap[i]);
    12724 
    12725  // Process default pools.
    12726  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12727  {
    12728  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12729  VMA_ASSERT(pBlockVector);
    12730  pBlockVector->AddStats(pStats);
    12731  }
    12732 
    12733  // Process custom pools.
    12734  {
    12735  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12736  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12737  {
    12738  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12739  }
    12740  }
    12741 
    12742  // Process dedicated allocations.
    12743  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12744  {
    12745  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12746  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12747  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12748  VMA_ASSERT(pDedicatedAllocVector);
    12749  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12750  {
    12751  VmaStatInfo allocationStatInfo;
    12752  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12753  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12754  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12756  }
    12757  }
    12758 
    12759  // Postprocess.
    12760  VmaPostprocessCalcStatInfo(pStats->total);
    12761  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12762  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12763  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12764  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12765 }
    12766 
    12767 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12768 
    12769 VkResult VmaAllocator_T::Defragment(
    12770  VmaAllocation* pAllocations,
    12771  size_t allocationCount,
    12772  VkBool32* pAllocationsChanged,
    12773  const VmaDefragmentationInfo* pDefragmentationInfo,
    12774  VmaDefragmentationStats* pDefragmentationStats)
    12775 {
    12776  if(pAllocationsChanged != VMA_NULL)
    12777  {
    12778  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12779  }
    12780  if(pDefragmentationStats != VMA_NULL)
    12781  {
    12782  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12783  }
    12784 
    12785  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12786 
    12787  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12788 
    12789  const size_t poolCount = m_Pools.size();
    12790 
    12791  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12792  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12793  {
    12794  VmaAllocation hAlloc = pAllocations[allocIndex];
    12795  VMA_ASSERT(hAlloc);
    12796  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12797  // DedicatedAlloc cannot be defragmented.
    12798  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12799  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12800  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12801  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12802  // Lost allocation cannot be defragmented.
    12803  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12804  {
    12805  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12806 
    12807  const VmaPool hAllocPool = hAlloc->GetPool();
    12808  // This allocation belongs to custom pool.
    12809  if(hAllocPool != VK_NULL_HANDLE)
    12810  {
    12811  // Pools with linear or buddy algorithm are not defragmented.
    12812  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12813  {
    12814  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12815  }
    12816  }
    12817  // This allocation belongs to general pool.
    12818  else
    12819  {
    12820  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12821  }
    12822 
    12823  if(pAllocBlockVector != VMA_NULL)
    12824  {
    12825  VmaDefragmentator* const pDefragmentator =
    12826  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12827  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12828  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12829  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12830  }
    12831  }
    12832  }
    12833 
    12834  VkResult result = VK_SUCCESS;
    12835 
    12836  // ======== Main processing.
    12837 
    12838  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12839  uint32_t maxAllocationsToMove = UINT32_MAX;
    12840  if(pDefragmentationInfo != VMA_NULL)
    12841  {
    12842  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12843  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12844  }
    12845 
    12846  // Process standard memory.
    12847  for(uint32_t memTypeIndex = 0;
    12848  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12849  ++memTypeIndex)
    12850  {
    12851  // Only HOST_VISIBLE memory types can be defragmented.
    12852  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12853  {
    12854  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12855  pDefragmentationStats,
    12856  maxBytesToMove,
    12857  maxAllocationsToMove);
    12858  }
    12859  }
    12860 
    12861  // Process custom pools.
    12862  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12863  {
    12864  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12865  pDefragmentationStats,
    12866  maxBytesToMove,
    12867  maxAllocationsToMove);
    12868  }
    12869 
    12870  // ======== Destroy defragmentators.
    12871 
    12872  // Process custom pools.
    12873  for(size_t poolIndex = poolCount; poolIndex--; )
    12874  {
    12875  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12876  }
    12877 
    12878  // Process standard memory.
    12879  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12880  {
    12881  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12882  {
    12883  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12884  }
    12885  }
    12886 
    12887  return result;
    12888 }
    12889 
    12890 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12891 {
    12892  if(hAllocation->CanBecomeLost())
    12893  {
    12894  /*
    12895  Warning: This is a carefully designed algorithm.
    12896  Do not modify unless you really know what you're doing :)
    12897  */
    12898  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12899  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12900  for(;;)
    12901  {
    12902  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12903  {
    12904  pAllocationInfo->memoryType = UINT32_MAX;
    12905  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12906  pAllocationInfo->offset = 0;
    12907  pAllocationInfo->size = hAllocation->GetSize();
    12908  pAllocationInfo->pMappedData = VMA_NULL;
    12909  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12910  return;
    12911  }
    12912  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12913  {
    12914  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12915  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12916  pAllocationInfo->offset = hAllocation->GetOffset();
    12917  pAllocationInfo->size = hAllocation->GetSize();
    12918  pAllocationInfo->pMappedData = VMA_NULL;
    12919  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12920  return;
    12921  }
    12922  else // Last use time earlier than current time.
    12923  {
    12924  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12925  {
    12926  localLastUseFrameIndex = localCurrFrameIndex;
    12927  }
    12928  }
    12929  }
    12930  }
    12931  else
    12932  {
    12933 #if VMA_STATS_STRING_ENABLED
    12934  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12935  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12936  for(;;)
    12937  {
    12938  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12939  if(localLastUseFrameIndex == localCurrFrameIndex)
    12940  {
    12941  break;
    12942  }
    12943  else // Last use time earlier than current time.
    12944  {
    12945  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12946  {
    12947  localLastUseFrameIndex = localCurrFrameIndex;
    12948  }
    12949  }
    12950  }
    12951 #endif
    12952 
    12953  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12954  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12955  pAllocationInfo->offset = hAllocation->GetOffset();
    12956  pAllocationInfo->size = hAllocation->GetSize();
    12957  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12958  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12959  }
    12960 }
    12961 
    12962 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12963 {
    12964  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12965  if(hAllocation->CanBecomeLost())
    12966  {
    12967  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12968  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12969  for(;;)
    12970  {
    12971  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12972  {
    12973  return false;
    12974  }
    12975  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12976  {
    12977  return true;
    12978  }
    12979  else // Last use time earlier than current time.
    12980  {
    12981  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12982  {
    12983  localLastUseFrameIndex = localCurrFrameIndex;
    12984  }
    12985  }
    12986  }
    12987  }
    12988  else
    12989  {
    12990 #if VMA_STATS_STRING_ENABLED
    12991  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12992  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12993  for(;;)
    12994  {
    12995  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12996  if(localLastUseFrameIndex == localCurrFrameIndex)
    12997  {
    12998  break;
    12999  }
    13000  else // Last use time earlier than current time.
    13001  {
    13002  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13003  {
    13004  localLastUseFrameIndex = localCurrFrameIndex;
    13005  }
    13006  }
    13007  }
    13008 #endif
    13009 
    13010  return true;
    13011  }
    13012 }
    13013 
    13014 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13015 {
    13016  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13017 
    13018  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13019 
    13020  if(newCreateInfo.maxBlockCount == 0)
    13021  {
    13022  newCreateInfo.maxBlockCount = SIZE_MAX;
    13023  }
    13024  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13025  {
    13026  return VK_ERROR_INITIALIZATION_FAILED;
    13027  }
    13028 
    13029  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13030 
    13031  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13032 
    13033  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13034  if(res != VK_SUCCESS)
    13035  {
    13036  vma_delete(this, *pPool);
    13037  *pPool = VMA_NULL;
    13038  return res;
    13039  }
    13040 
    13041  // Add to m_Pools.
    13042  {
    13043  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13044  (*pPool)->SetId(m_NextPoolId++);
    13045  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13046  }
    13047 
    13048  return VK_SUCCESS;
    13049 }
    13050 
    13051 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13052 {
    13053  // Remove from m_Pools.
    13054  {
    13055  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13056  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13057  VMA_ASSERT(success && "Pool not found in Allocator.");
    13058  }
    13059 
    13060  vma_delete(this, pool);
    13061 }
    13062 
    13063 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13064 {
    13065  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13066 }
    13067 
    13068 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13069 {
    13070  m_CurrentFrameIndex.store(frameIndex);
    13071 }
    13072 
    13073 void VmaAllocator_T::MakePoolAllocationsLost(
    13074  VmaPool hPool,
    13075  size_t* pLostAllocationCount)
    13076 {
    13077  hPool->m_BlockVector.MakePoolAllocationsLost(
    13078  m_CurrentFrameIndex.load(),
    13079  pLostAllocationCount);
    13080 }
    13081 
    13082 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13083 {
    13084  return hPool->m_BlockVector.CheckCorruption();
    13085 }
    13086 
    13087 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13088 {
    13089  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13090 
    13091  // Process default pools.
    13092  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13093  {
    13094  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13095  {
    13096  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13097  VMA_ASSERT(pBlockVector);
    13098  VkResult localRes = pBlockVector->CheckCorruption();
    13099  switch(localRes)
    13100  {
    13101  case VK_ERROR_FEATURE_NOT_PRESENT:
    13102  break;
    13103  case VK_SUCCESS:
    13104  finalRes = VK_SUCCESS;
    13105  break;
    13106  default:
    13107  return localRes;
    13108  }
    13109  }
    13110  }
    13111 
    13112  // Process custom pools.
    13113  {
    13114  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13115  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13116  {
    13117  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13118  {
    13119  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13120  switch(localRes)
    13121  {
    13122  case VK_ERROR_FEATURE_NOT_PRESENT:
    13123  break;
    13124  case VK_SUCCESS:
    13125  finalRes = VK_SUCCESS;
    13126  break;
    13127  default:
    13128  return localRes;
    13129  }
    13130  }
    13131  }
    13132  }
    13133 
    13134  return finalRes;
    13135 }
    13136 
    13137 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13138 {
    13139  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13140  (*pAllocation)->InitLost();
    13141 }
    13142 
    13143 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13144 {
    13145  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13146 
    13147  VkResult res;
    13148  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13149  {
    13150  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13151  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13152  {
    13153  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13154  if(res == VK_SUCCESS)
    13155  {
    13156  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13157  }
    13158  }
    13159  else
    13160  {
    13161  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13162  }
    13163  }
    13164  else
    13165  {
    13166  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13167  }
    13168 
    13169  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13170  {
    13171  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13172  }
    13173 
    13174  return res;
    13175 }
    13176 
    13177 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13178 {
    13179  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13180  {
    13181  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13182  }
    13183 
    13184  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13185 
    13186  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13187  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13188  {
    13189  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13190  m_HeapSizeLimit[heapIndex] += size;
    13191  }
    13192 }
    13193 
    13194 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13195 {
    13196  if(hAllocation->CanBecomeLost())
    13197  {
    13198  return VK_ERROR_MEMORY_MAP_FAILED;
    13199  }
    13200 
    13201  switch(hAllocation->GetType())
    13202  {
    13203  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13204  {
    13205  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13206  char *pBytes = VMA_NULL;
    13207  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13208  if(res == VK_SUCCESS)
    13209  {
    13210  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13211  hAllocation->BlockAllocMap();
    13212  }
    13213  return res;
    13214  }
    13215  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13216  return hAllocation->DedicatedAllocMap(this, ppData);
    13217  default:
    13218  VMA_ASSERT(0);
    13219  return VK_ERROR_MEMORY_MAP_FAILED;
    13220  }
    13221 }
    13222 
    13223 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13224 {
    13225  switch(hAllocation->GetType())
    13226  {
    13227  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13228  {
    13229  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13230  hAllocation->BlockAllocUnmap();
    13231  pBlock->Unmap(this, 1);
    13232  }
    13233  break;
    13234  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13235  hAllocation->DedicatedAllocUnmap(this);
    13236  break;
    13237  default:
    13238  VMA_ASSERT(0);
    13239  }
    13240 }
    13241 
    13242 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13243 {
    13244  VkResult res = VK_SUCCESS;
    13245  switch(hAllocation->GetType())
    13246  {
    13247  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13248  res = GetVulkanFunctions().vkBindBufferMemory(
    13249  m_hDevice,
    13250  hBuffer,
    13251  hAllocation->GetMemory(),
    13252  0); //memoryOffset
    13253  break;
    13254  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13255  {
    13256  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13257  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13258  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13259  break;
    13260  }
    13261  default:
    13262  VMA_ASSERT(0);
    13263  }
    13264  return res;
    13265 }
    13266 
    13267 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13268 {
    13269  VkResult res = VK_SUCCESS;
    13270  switch(hAllocation->GetType())
    13271  {
    13272  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13273  res = GetVulkanFunctions().vkBindImageMemory(
    13274  m_hDevice,
    13275  hImage,
    13276  hAllocation->GetMemory(),
    13277  0); //memoryOffset
    13278  break;
    13279  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13280  {
    13281  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13282  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13283  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13284  break;
    13285  }
    13286  default:
    13287  VMA_ASSERT(0);
    13288  }
    13289  return res;
    13290 }
    13291 
    13292 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13293  VmaAllocation hAllocation,
    13294  VkDeviceSize offset, VkDeviceSize size,
    13295  VMA_CACHE_OPERATION op)
    13296 {
    13297  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13298  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13299  {
    13300  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13301  VMA_ASSERT(offset <= allocationSize);
    13302 
    13303  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13304 
    13305  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13306  memRange.memory = hAllocation->GetMemory();
    13307 
    13308  switch(hAllocation->GetType())
    13309  {
    13310  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13311  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13312  if(size == VK_WHOLE_SIZE)
    13313  {
    13314  memRange.size = allocationSize - memRange.offset;
    13315  }
    13316  else
    13317  {
    13318  VMA_ASSERT(offset + size <= allocationSize);
    13319  memRange.size = VMA_MIN(
    13320  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13321  allocationSize - memRange.offset);
    13322  }
    13323  break;
    13324 
    13325  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13326  {
    13327  // 1. Still within this allocation.
    13328  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13329  if(size == VK_WHOLE_SIZE)
    13330  {
    13331  size = allocationSize - offset;
    13332  }
    13333  else
    13334  {
    13335  VMA_ASSERT(offset + size <= allocationSize);
    13336  }
    13337  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13338 
    13339  // 2. Adjust to whole block.
    13340  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13341  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13342  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13343  memRange.offset += allocationOffset;
    13344  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13345 
    13346  break;
    13347  }
    13348 
    13349  default:
    13350  VMA_ASSERT(0);
    13351  }
    13352 
    13353  switch(op)
    13354  {
    13355  case VMA_CACHE_FLUSH:
    13356  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13357  break;
    13358  case VMA_CACHE_INVALIDATE:
    13359  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13360  break;
    13361  default:
    13362  VMA_ASSERT(0);
    13363  }
    13364  }
    13365  // else: Just ignore this call.
    13366 }
    13367 
    13368 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13369 {
    13370  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13371 
    13372  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13373  {
    13374  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13375  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13376  VMA_ASSERT(pDedicatedAllocations);
    13377  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13378  VMA_ASSERT(success);
    13379  }
    13380 
    13381  VkDeviceMemory hMemory = allocation->GetMemory();
    13382 
    13383  /*
    13384  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13385  before vkFreeMemory.
    13386 
    13387  if(allocation->GetMappedData() != VMA_NULL)
    13388  {
    13389  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13390  }
    13391  */
    13392 
    13393  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13394 
    13395  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13396 }
    13397 
    13398 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13399 {
    13400  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13401  !hAllocation->CanBecomeLost() &&
    13402  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13403  {
    13404  void* pData = VMA_NULL;
    13405  VkResult res = Map(hAllocation, &pData);
    13406  if(res == VK_SUCCESS)
    13407  {
    13408  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13409  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13410  Unmap(hAllocation);
    13411  }
    13412  else
    13413  {
    13414  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13415  }
    13416  }
    13417 }
    13418 
    13419 #if VMA_STATS_STRING_ENABLED
    13420 
    13421 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13422 {
    13423  bool dedicatedAllocationsStarted = false;
    13424  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13425  {
    13426  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13427  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13428  VMA_ASSERT(pDedicatedAllocVector);
    13429  if(pDedicatedAllocVector->empty() == false)
    13430  {
    13431  if(dedicatedAllocationsStarted == false)
    13432  {
    13433  dedicatedAllocationsStarted = true;
    13434  json.WriteString("DedicatedAllocations");
    13435  json.BeginObject();
    13436  }
    13437 
    13438  json.BeginString("Type ");
    13439  json.ContinueString(memTypeIndex);
    13440  json.EndString();
    13441 
    13442  json.BeginArray();
    13443 
    13444  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13445  {
    13446  json.BeginObject(true);
    13447  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13448  hAlloc->PrintParameters(json);
    13449  json.EndObject();
    13450  }
    13451 
    13452  json.EndArray();
    13453  }
    13454  }
    13455  if(dedicatedAllocationsStarted)
    13456  {
    13457  json.EndObject();
    13458  }
    13459 
    13460  {
    13461  bool allocationsStarted = false;
    13462  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13463  {
    13464  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13465  {
    13466  if(allocationsStarted == false)
    13467  {
    13468  allocationsStarted = true;
    13469  json.WriteString("DefaultPools");
    13470  json.BeginObject();
    13471  }
    13472 
    13473  json.BeginString("Type ");
    13474  json.ContinueString(memTypeIndex);
    13475  json.EndString();
    13476 
    13477  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13478  }
    13479  }
    13480  if(allocationsStarted)
    13481  {
    13482  json.EndObject();
    13483  }
    13484  }
    13485 
    13486  // Custom pools
    13487  {
    13488  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13489  const size_t poolCount = m_Pools.size();
    13490  if(poolCount > 0)
    13491  {
    13492  json.WriteString("Pools");
    13493  json.BeginObject();
    13494  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13495  {
    13496  json.BeginString();
    13497  json.ContinueString(m_Pools[poolIndex]->GetId());
    13498  json.EndString();
    13499 
    13500  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13501  }
    13502  json.EndObject();
    13503  }
    13504  }
    13505 }
    13506 
    13507 #endif // #if VMA_STATS_STRING_ENABLED
    13508 
    13510 // Public interface
    13511 
    13512 VkResult vmaCreateAllocator(
    13513  const VmaAllocatorCreateInfo* pCreateInfo,
    13514  VmaAllocator* pAllocator)
    13515 {
    13516  VMA_ASSERT(pCreateInfo && pAllocator);
    13517  VMA_DEBUG_LOG("vmaCreateAllocator");
    13518  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13519  return (*pAllocator)->Init(pCreateInfo);
    13520 }
    13521 
    13522 void vmaDestroyAllocator(
    13523  VmaAllocator allocator)
    13524 {
    13525  if(allocator != VK_NULL_HANDLE)
    13526  {
    13527  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13528  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13529  vma_delete(&allocationCallbacks, allocator);
    13530  }
    13531 }
    13532 
    13534  VmaAllocator allocator,
    13535  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13536 {
    13537  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13538  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13539 }
    13540 
    13542  VmaAllocator allocator,
    13543  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13544 {
    13545  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13546  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13547 }
    13548 
    13550  VmaAllocator allocator,
    13551  uint32_t memoryTypeIndex,
    13552  VkMemoryPropertyFlags* pFlags)
    13553 {
    13554  VMA_ASSERT(allocator && pFlags);
    13555  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13556  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13557 }
    13558 
    13560  VmaAllocator allocator,
    13561  uint32_t frameIndex)
    13562 {
    13563  VMA_ASSERT(allocator);
    13564  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13565 
    13566  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13567 
    13568  allocator->SetCurrentFrameIndex(frameIndex);
    13569 }
    13570 
    13571 void vmaCalculateStats(
    13572  VmaAllocator allocator,
    13573  VmaStats* pStats)
    13574 {
    13575  VMA_ASSERT(allocator && pStats);
    13576  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13577  allocator->CalculateStats(pStats);
    13578 }
    13579 
    13580 #if VMA_STATS_STRING_ENABLED
    13581 
    13582 void vmaBuildStatsString(
    13583  VmaAllocator allocator,
    13584  char** ppStatsString,
    13585  VkBool32 detailedMap)
    13586 {
    13587  VMA_ASSERT(allocator && ppStatsString);
    13588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13589 
    13590  VmaStringBuilder sb(allocator);
    13591  {
    13592  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13593  json.BeginObject();
    13594 
    13595  VmaStats stats;
    13596  allocator->CalculateStats(&stats);
    13597 
    13598  json.WriteString("Total");
    13599  VmaPrintStatInfo(json, stats.total);
    13600 
    13601  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13602  {
    13603  json.BeginString("Heap ");
    13604  json.ContinueString(heapIndex);
    13605  json.EndString();
    13606  json.BeginObject();
    13607 
    13608  json.WriteString("Size");
    13609  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13610 
    13611  json.WriteString("Flags");
    13612  json.BeginArray(true);
    13613  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13614  {
    13615  json.WriteString("DEVICE_LOCAL");
    13616  }
    13617  json.EndArray();
    13618 
    13619  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13620  {
    13621  json.WriteString("Stats");
    13622  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13623  }
    13624 
    13625  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13626  {
    13627  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13628  {
    13629  json.BeginString("Type ");
    13630  json.ContinueString(typeIndex);
    13631  json.EndString();
    13632 
    13633  json.BeginObject();
    13634 
    13635  json.WriteString("Flags");
    13636  json.BeginArray(true);
    13637  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13638  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13639  {
    13640  json.WriteString("DEVICE_LOCAL");
    13641  }
    13642  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13643  {
    13644  json.WriteString("HOST_VISIBLE");
    13645  }
    13646  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13647  {
    13648  json.WriteString("HOST_COHERENT");
    13649  }
    13650  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13651  {
    13652  json.WriteString("HOST_CACHED");
    13653  }
    13654  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13655  {
    13656  json.WriteString("LAZILY_ALLOCATED");
    13657  }
    13658  json.EndArray();
    13659 
    13660  if(stats.memoryType[typeIndex].blockCount > 0)
    13661  {
    13662  json.WriteString("Stats");
    13663  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13664  }
    13665 
    13666  json.EndObject();
    13667  }
    13668  }
    13669 
    13670  json.EndObject();
    13671  }
    13672  if(detailedMap == VK_TRUE)
    13673  {
    13674  allocator->PrintDetailedMap(json);
    13675  }
    13676 
    13677  json.EndObject();
    13678  }
    13679 
    13680  const size_t len = sb.GetLength();
    13681  char* const pChars = vma_new_array(allocator, char, len + 1);
    13682  if(len > 0)
    13683  {
    13684  memcpy(pChars, sb.GetData(), len);
    13685  }
    13686  pChars[len] = '\0';
    13687  *ppStatsString = pChars;
    13688 }
    13689 
    13690 void vmaFreeStatsString(
    13691  VmaAllocator allocator,
    13692  char* pStatsString)
    13693 {
    13694  if(pStatsString != VMA_NULL)
    13695  {
    13696  VMA_ASSERT(allocator);
    13697  size_t len = strlen(pStatsString);
    13698  vma_delete_array(allocator, pStatsString, len + 1);
    13699  }
    13700 }
    13701 
    13702 #endif // #if VMA_STATS_STRING_ENABLED
    13703 
    13704 /*
    13705 This function is not protected by any mutex because it just reads immutable data.
    13706 */
    13707 VkResult vmaFindMemoryTypeIndex(
    13708  VmaAllocator allocator,
    13709  uint32_t memoryTypeBits,
    13710  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13711  uint32_t* pMemoryTypeIndex)
    13712 {
    13713  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13714  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13715  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13716 
    13717  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13718  {
    13719  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13720  }
    13721 
    13722  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13723  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13724 
    13725  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13726  if(mapped)
    13727  {
    13728  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13729  }
    13730 
    13731  // Convert usage to requiredFlags and preferredFlags.
    13732  switch(pAllocationCreateInfo->usage)
    13733  {
    13735  break;
    13737  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13738  {
    13739  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13740  }
    13741  break;
    13743  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13744  break;
    13746  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13747  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13748  {
    13749  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13750  }
    13751  break;
    13753  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13754  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13755  break;
    13756  default:
    13757  break;
    13758  }
    13759 
    13760  *pMemoryTypeIndex = UINT32_MAX;
    13761  uint32_t minCost = UINT32_MAX;
    13762  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13763  memTypeIndex < allocator->GetMemoryTypeCount();
    13764  ++memTypeIndex, memTypeBit <<= 1)
    13765  {
    13766  // This memory type is acceptable according to memoryTypeBits bitmask.
    13767  if((memTypeBit & memoryTypeBits) != 0)
    13768  {
    13769  const VkMemoryPropertyFlags currFlags =
    13770  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13771  // This memory type contains requiredFlags.
    13772  if((requiredFlags & ~currFlags) == 0)
    13773  {
    13774  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13775  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13776  // Remember memory type with lowest cost.
    13777  if(currCost < minCost)
    13778  {
    13779  *pMemoryTypeIndex = memTypeIndex;
    13780  if(currCost == 0)
    13781  {
    13782  return VK_SUCCESS;
    13783  }
    13784  minCost = currCost;
    13785  }
    13786  }
    13787  }
    13788  }
    13789  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13790 }
    13791 
    13793  VmaAllocator allocator,
    13794  const VkBufferCreateInfo* pBufferCreateInfo,
    13795  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13796  uint32_t* pMemoryTypeIndex)
    13797 {
    13798  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13799  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13800  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13802 
    13803  const VkDevice hDev = allocator->m_hDevice;
    13804  VkBuffer hBuffer = VK_NULL_HANDLE;
    13805  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13806  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13807  if(res == VK_SUCCESS)
    13808  {
    13809  VkMemoryRequirements memReq = {};
    13810  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13811  hDev, hBuffer, &memReq);
    13812 
    13813  res = vmaFindMemoryTypeIndex(
    13814  allocator,
    13815  memReq.memoryTypeBits,
    13816  pAllocationCreateInfo,
    13817  pMemoryTypeIndex);
    13818 
    13819  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13820  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13821  }
    13822  return res;
    13823 }
    13824 
    13826  VmaAllocator allocator,
    13827  const VkImageCreateInfo* pImageCreateInfo,
    13828  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13829  uint32_t* pMemoryTypeIndex)
    13830 {
    13831  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13832  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13833  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13835 
    13836  const VkDevice hDev = allocator->m_hDevice;
    13837  VkImage hImage = VK_NULL_HANDLE;
    13838  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13839  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13840  if(res == VK_SUCCESS)
    13841  {
    13842  VkMemoryRequirements memReq = {};
    13843  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13844  hDev, hImage, &memReq);
    13845 
    13846  res = vmaFindMemoryTypeIndex(
    13847  allocator,
    13848  memReq.memoryTypeBits,
    13849  pAllocationCreateInfo,
    13850  pMemoryTypeIndex);
    13851 
    13852  allocator->GetVulkanFunctions().vkDestroyImage(
    13853  hDev, hImage, allocator->GetAllocationCallbacks());
    13854  }
    13855  return res;
    13856 }
    13857 
    13858 VkResult vmaCreatePool(
    13859  VmaAllocator allocator,
    13860  const VmaPoolCreateInfo* pCreateInfo,
    13861  VmaPool* pPool)
    13862 {
    13863  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13864 
    13865  VMA_DEBUG_LOG("vmaCreatePool");
    13866 
    13867  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13868 
    13869  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13870 
    13871 #if VMA_RECORDING_ENABLED
    13872  if(allocator->GetRecorder() != VMA_NULL)
    13873  {
    13874  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13875  }
    13876 #endif
    13877 
    13878  return res;
    13879 }
    13880 
    13881 void vmaDestroyPool(
    13882  VmaAllocator allocator,
    13883  VmaPool pool)
    13884 {
    13885  VMA_ASSERT(allocator);
    13886 
    13887  if(pool == VK_NULL_HANDLE)
    13888  {
    13889  return;
    13890  }
    13891 
    13892  VMA_DEBUG_LOG("vmaDestroyPool");
    13893 
    13894  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13895 
    13896 #if VMA_RECORDING_ENABLED
    13897  if(allocator->GetRecorder() != VMA_NULL)
    13898  {
    13899  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13900  }
    13901 #endif
    13902 
    13903  allocator->DestroyPool(pool);
    13904 }
    13905 
    13906 void vmaGetPoolStats(
    13907  VmaAllocator allocator,
    13908  VmaPool pool,
    13909  VmaPoolStats* pPoolStats)
    13910 {
    13911  VMA_ASSERT(allocator && pool && pPoolStats);
    13912 
    13913  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13914 
    13915  allocator->GetPoolStats(pool, pPoolStats);
    13916 }
    13917 
    13919  VmaAllocator allocator,
    13920  VmaPool pool,
    13921  size_t* pLostAllocationCount)
    13922 {
    13923  VMA_ASSERT(allocator && pool);
    13924 
    13925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13926 
    13927 #if VMA_RECORDING_ENABLED
    13928  if(allocator->GetRecorder() != VMA_NULL)
    13929  {
    13930  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13931  }
    13932 #endif
    13933 
    13934  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13935 }
    13936 
    13937 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13938 {
    13939  VMA_ASSERT(allocator && pool);
    13940 
    13941  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13942 
    13943  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13944 
    13945  return allocator->CheckPoolCorruption(pool);
    13946 }
    13947 
    13948 VkResult vmaAllocateMemory(
    13949  VmaAllocator allocator,
    13950  const VkMemoryRequirements* pVkMemoryRequirements,
    13951  const VmaAllocationCreateInfo* pCreateInfo,
    13952  VmaAllocation* pAllocation,
    13953  VmaAllocationInfo* pAllocationInfo)
    13954 {
    13955  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13956 
    13957  VMA_DEBUG_LOG("vmaAllocateMemory");
    13958 
    13959  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13960 
    13961  VkResult result = allocator->AllocateMemory(
    13962  *pVkMemoryRequirements,
    13963  false, // requiresDedicatedAllocation
    13964  false, // prefersDedicatedAllocation
    13965  VK_NULL_HANDLE, // dedicatedBuffer
    13966  VK_NULL_HANDLE, // dedicatedImage
    13967  *pCreateInfo,
    13968  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13969  pAllocation);
    13970 
    13971 #if VMA_RECORDING_ENABLED
    13972  if(allocator->GetRecorder() != VMA_NULL)
    13973  {
    13974  allocator->GetRecorder()->RecordAllocateMemory(
    13975  allocator->GetCurrentFrameIndex(),
    13976  *pVkMemoryRequirements,
    13977  *pCreateInfo,
    13978  *pAllocation);
    13979  }
    13980 #endif
    13981 
    13982  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13983  {
    13984  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13985  }
    13986 
    13987  return result;
    13988 }
    13989 
    13991  VmaAllocator allocator,
    13992  VkBuffer buffer,
    13993  const VmaAllocationCreateInfo* pCreateInfo,
    13994  VmaAllocation* pAllocation,
    13995  VmaAllocationInfo* pAllocationInfo)
    13996 {
    13997  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13998 
    13999  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14000 
    14001  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14002 
    14003  VkMemoryRequirements vkMemReq = {};
    14004  bool requiresDedicatedAllocation = false;
    14005  bool prefersDedicatedAllocation = false;
    14006  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14007  requiresDedicatedAllocation,
    14008  prefersDedicatedAllocation);
    14009 
    14010  VkResult result = allocator->AllocateMemory(
    14011  vkMemReq,
    14012  requiresDedicatedAllocation,
    14013  prefersDedicatedAllocation,
    14014  buffer, // dedicatedBuffer
    14015  VK_NULL_HANDLE, // dedicatedImage
    14016  *pCreateInfo,
    14017  VMA_SUBALLOCATION_TYPE_BUFFER,
    14018  pAllocation);
    14019 
    14020 #if VMA_RECORDING_ENABLED
    14021  if(allocator->GetRecorder() != VMA_NULL)
    14022  {
    14023  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14024  allocator->GetCurrentFrameIndex(),
    14025  vkMemReq,
    14026  requiresDedicatedAllocation,
    14027  prefersDedicatedAllocation,
    14028  *pCreateInfo,
    14029  *pAllocation);
    14030  }
    14031 #endif
    14032 
    14033  if(pAllocationInfo && result == VK_SUCCESS)
    14034  {
    14035  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14036  }
    14037 
    14038  return result;
    14039 }
    14040 
    14041 VkResult vmaAllocateMemoryForImage(
    14042  VmaAllocator allocator,
    14043  VkImage image,
    14044  const VmaAllocationCreateInfo* pCreateInfo,
    14045  VmaAllocation* pAllocation,
    14046  VmaAllocationInfo* pAllocationInfo)
    14047 {
    14048  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14049 
    14050  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14051 
    14052  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14053 
    14054  VkMemoryRequirements vkMemReq = {};
    14055  bool requiresDedicatedAllocation = false;
    14056  bool prefersDedicatedAllocation = false;
    14057  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14058  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14059 
    14060  VkResult result = allocator->AllocateMemory(
    14061  vkMemReq,
    14062  requiresDedicatedAllocation,
    14063  prefersDedicatedAllocation,
    14064  VK_NULL_HANDLE, // dedicatedBuffer
    14065  image, // dedicatedImage
    14066  *pCreateInfo,
    14067  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14068  pAllocation);
    14069 
    14070 #if VMA_RECORDING_ENABLED
    14071  if(allocator->GetRecorder() != VMA_NULL)
    14072  {
    14073  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14074  allocator->GetCurrentFrameIndex(),
    14075  vkMemReq,
    14076  requiresDedicatedAllocation,
    14077  prefersDedicatedAllocation,
    14078  *pCreateInfo,
    14079  *pAllocation);
    14080  }
    14081 #endif
    14082 
    14083  if(pAllocationInfo && result == VK_SUCCESS)
    14084  {
    14085  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14086  }
    14087 
    14088  return result;
    14089 }
    14090 
    14091 void vmaFreeMemory(
    14092  VmaAllocator allocator,
    14093  VmaAllocation allocation)
    14094 {
    14095  VMA_ASSERT(allocator);
    14096 
    14097  if(allocation == VK_NULL_HANDLE)
    14098  {
    14099  return;
    14100  }
    14101 
    14102  VMA_DEBUG_LOG("vmaFreeMemory");
    14103 
    14104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14105 
    14106 #if VMA_RECORDING_ENABLED
    14107  if(allocator->GetRecorder() != VMA_NULL)
    14108  {
    14109  allocator->GetRecorder()->RecordFreeMemory(
    14110  allocator->GetCurrentFrameIndex(),
    14111  allocation);
    14112  }
    14113 #endif
    14114 
    14115  allocator->FreeMemory(allocation);
    14116 }
    14117 
    14118 VkResult vmaResizeAllocation(
    14119  VmaAllocator allocator,
    14120  VmaAllocation allocation,
    14121  VkDeviceSize newSize)
    14122 {
    14123  VMA_ASSERT(allocator && allocation);
    14124 
    14125  VMA_DEBUG_LOG("vmaResizeAllocation");
    14126 
    14127  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14128 
    14129 #if VMA_RECORDING_ENABLED
    14130  if(allocator->GetRecorder() != VMA_NULL)
    14131  {
    14132  allocator->GetRecorder()->RecordResizeAllocation(
    14133  allocator->GetCurrentFrameIndex(),
    14134  allocation,
    14135  newSize);
    14136  }
    14137 #endif
    14138 
    14139  return allocator->ResizeAllocation(allocation, newSize);
    14140 }
    14141 
    14143  VmaAllocator allocator,
    14144  VmaAllocation allocation,
    14145  VmaAllocationInfo* pAllocationInfo)
    14146 {
    14147  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14148 
    14149  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14150 
    14151 #if VMA_RECORDING_ENABLED
    14152  if(allocator->GetRecorder() != VMA_NULL)
    14153  {
    14154  allocator->GetRecorder()->RecordGetAllocationInfo(
    14155  allocator->GetCurrentFrameIndex(),
    14156  allocation);
    14157  }
    14158 #endif
    14159 
    14160  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14161 }
    14162 
    14163 VkBool32 vmaTouchAllocation(
    14164  VmaAllocator allocator,
    14165  VmaAllocation allocation)
    14166 {
    14167  VMA_ASSERT(allocator && allocation);
    14168 
    14169  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14170 
    14171 #if VMA_RECORDING_ENABLED
    14172  if(allocator->GetRecorder() != VMA_NULL)
    14173  {
    14174  allocator->GetRecorder()->RecordTouchAllocation(
    14175  allocator->GetCurrentFrameIndex(),
    14176  allocation);
    14177  }
    14178 #endif
    14179 
    14180  return allocator->TouchAllocation(allocation);
    14181 }
    14182 
    14184  VmaAllocator allocator,
    14185  VmaAllocation allocation,
    14186  void* pUserData)
    14187 {
    14188  VMA_ASSERT(allocator && allocation);
    14189 
    14190  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14191 
    14192  allocation->SetUserData(allocator, pUserData);
    14193 
    14194 #if VMA_RECORDING_ENABLED
    14195  if(allocator->GetRecorder() != VMA_NULL)
    14196  {
    14197  allocator->GetRecorder()->RecordSetAllocationUserData(
    14198  allocator->GetCurrentFrameIndex(),
    14199  allocation,
    14200  pUserData);
    14201  }
    14202 #endif
    14203 }
    14204 
    14206  VmaAllocator allocator,
    14207  VmaAllocation* pAllocation)
    14208 {
    14209  VMA_ASSERT(allocator && pAllocation);
    14210 
    14211  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14212 
    14213  allocator->CreateLostAllocation(pAllocation);
    14214 
    14215 #if VMA_RECORDING_ENABLED
    14216  if(allocator->GetRecorder() != VMA_NULL)
    14217  {
    14218  allocator->GetRecorder()->RecordCreateLostAllocation(
    14219  allocator->GetCurrentFrameIndex(),
    14220  *pAllocation);
    14221  }
    14222 #endif
    14223 }
    14224 
    14225 VkResult vmaMapMemory(
    14226  VmaAllocator allocator,
    14227  VmaAllocation allocation,
    14228  void** ppData)
    14229 {
    14230  VMA_ASSERT(allocator && allocation && ppData);
    14231 
    14232  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14233 
    14234  VkResult res = allocator->Map(allocation, ppData);
    14235 
    14236 #if VMA_RECORDING_ENABLED
    14237  if(allocator->GetRecorder() != VMA_NULL)
    14238  {
    14239  allocator->GetRecorder()->RecordMapMemory(
    14240  allocator->GetCurrentFrameIndex(),
    14241  allocation);
    14242  }
    14243 #endif
    14244 
    14245  return res;
    14246 }
    14247 
    14248 void vmaUnmapMemory(
    14249  VmaAllocator allocator,
    14250  VmaAllocation allocation)
    14251 {
    14252  VMA_ASSERT(allocator && allocation);
    14253 
    14254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14255 
    14256 #if VMA_RECORDING_ENABLED
    14257  if(allocator->GetRecorder() != VMA_NULL)
    14258  {
    14259  allocator->GetRecorder()->RecordUnmapMemory(
    14260  allocator->GetCurrentFrameIndex(),
    14261  allocation);
    14262  }
    14263 #endif
    14264 
    14265  allocator->Unmap(allocation);
    14266 }
    14267 
    14268 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14269 {
    14270  VMA_ASSERT(allocator && allocation);
    14271 
    14272  VMA_DEBUG_LOG("vmaFlushAllocation");
    14273 
    14274  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14275 
    14276  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14277 
    14278 #if VMA_RECORDING_ENABLED
    14279  if(allocator->GetRecorder() != VMA_NULL)
    14280  {
    14281  allocator->GetRecorder()->RecordFlushAllocation(
    14282  allocator->GetCurrentFrameIndex(),
    14283  allocation, offset, size);
    14284  }
    14285 #endif
    14286 }
    14287 
    14288 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14289 {
    14290  VMA_ASSERT(allocator && allocation);
    14291 
    14292  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14293 
    14294  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14295 
    14296  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14297 
    14298 #if VMA_RECORDING_ENABLED
    14299  if(allocator->GetRecorder() != VMA_NULL)
    14300  {
    14301  allocator->GetRecorder()->RecordInvalidateAllocation(
    14302  allocator->GetCurrentFrameIndex(),
    14303  allocation, offset, size);
    14304  }
    14305 #endif
    14306 }
    14307 
    14308 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14309 {
    14310  VMA_ASSERT(allocator);
    14311 
    14312  VMA_DEBUG_LOG("vmaCheckCorruption");
    14313 
    14314  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14315 
    14316  return allocator->CheckCorruption(memoryTypeBits);
    14317 }
    14318 
    14319 VkResult vmaDefragment(
    14320  VmaAllocator allocator,
    14321  VmaAllocation* pAllocations,
    14322  size_t allocationCount,
    14323  VkBool32* pAllocationsChanged,
    14324  const VmaDefragmentationInfo *pDefragmentationInfo,
    14325  VmaDefragmentationStats* pDefragmentationStats)
    14326 {
    14327  VMA_ASSERT(allocator && pAllocations);
    14328 
    14329  VMA_DEBUG_LOG("vmaDefragment");
    14330 
    14331  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14332 
    14333  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14334 }
    14335 
    14336 VkResult vmaBindBufferMemory(
    14337  VmaAllocator allocator,
    14338  VmaAllocation allocation,
    14339  VkBuffer buffer)
    14340 {
    14341  VMA_ASSERT(allocator && allocation && buffer);
    14342 
    14343  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14344 
    14345  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14346 
    14347  return allocator->BindBufferMemory(allocation, buffer);
    14348 }
    14349 
    14350 VkResult vmaBindImageMemory(
    14351  VmaAllocator allocator,
    14352  VmaAllocation allocation,
    14353  VkImage image)
    14354 {
    14355  VMA_ASSERT(allocator && allocation && image);
    14356 
    14357  VMA_DEBUG_LOG("vmaBindImageMemory");
    14358 
    14359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14360 
    14361  return allocator->BindImageMemory(allocation, image);
    14362 }
    14363 
    14364 VkResult vmaCreateBuffer(
    14365  VmaAllocator allocator,
    14366  const VkBufferCreateInfo* pBufferCreateInfo,
    14367  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14368  VkBuffer* pBuffer,
    14369  VmaAllocation* pAllocation,
    14370  VmaAllocationInfo* pAllocationInfo)
    14371 {
    14372  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14373 
    14374  if(pBufferCreateInfo->size == 0)
    14375  {
    14376  return VK_ERROR_VALIDATION_FAILED_EXT;
    14377  }
    14378 
    14379  VMA_DEBUG_LOG("vmaCreateBuffer");
    14380 
    14381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14382 
    14383  *pBuffer = VK_NULL_HANDLE;
    14384  *pAllocation = VK_NULL_HANDLE;
    14385 
    14386  // 1. Create VkBuffer.
    14387  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14388  allocator->m_hDevice,
    14389  pBufferCreateInfo,
    14390  allocator->GetAllocationCallbacks(),
    14391  pBuffer);
    14392  if(res >= 0)
    14393  {
    14394  // 2. vkGetBufferMemoryRequirements.
    14395  VkMemoryRequirements vkMemReq = {};
    14396  bool requiresDedicatedAllocation = false;
    14397  bool prefersDedicatedAllocation = false;
    14398  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14399  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14400 
    14401  // Make sure alignment requirements for specific buffer usages reported
    14402  // in Physical Device Properties are included in alignment reported by memory requirements.
    14403  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14404  {
    14405  VMA_ASSERT(vkMemReq.alignment %
    14406  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14407  }
    14408  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14409  {
    14410  VMA_ASSERT(vkMemReq.alignment %
    14411  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14412  }
    14413  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14414  {
    14415  VMA_ASSERT(vkMemReq.alignment %
    14416  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14417  }
    14418 
    14419  // 3. Allocate memory using allocator.
    14420  res = allocator->AllocateMemory(
    14421  vkMemReq,
    14422  requiresDedicatedAllocation,
    14423  prefersDedicatedAllocation,
    14424  *pBuffer, // dedicatedBuffer
    14425  VK_NULL_HANDLE, // dedicatedImage
    14426  *pAllocationCreateInfo,
    14427  VMA_SUBALLOCATION_TYPE_BUFFER,
    14428  pAllocation);
    14429 
    14430 #if VMA_RECORDING_ENABLED
    14431  if(allocator->GetRecorder() != VMA_NULL)
    14432  {
    14433  allocator->GetRecorder()->RecordCreateBuffer(
    14434  allocator->GetCurrentFrameIndex(),
    14435  *pBufferCreateInfo,
    14436  *pAllocationCreateInfo,
    14437  *pAllocation);
    14438  }
    14439 #endif
    14440 
    14441  if(res >= 0)
    14442  {
    14443  // 3. Bind buffer with memory.
    14444  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14445  if(res >= 0)
    14446  {
    14447  // All steps succeeded.
    14448  #if VMA_STATS_STRING_ENABLED
    14449  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14450  #endif
    14451  if(pAllocationInfo != VMA_NULL)
    14452  {
    14453  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14454  }
    14455 
    14456  return VK_SUCCESS;
    14457  }
    14458  allocator->FreeMemory(*pAllocation);
    14459  *pAllocation = VK_NULL_HANDLE;
    14460  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14461  *pBuffer = VK_NULL_HANDLE;
    14462  return res;
    14463  }
    14464  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14465  *pBuffer = VK_NULL_HANDLE;
    14466  return res;
    14467  }
    14468  return res;
    14469 }
    14470 
    14471 void vmaDestroyBuffer(
    14472  VmaAllocator allocator,
    14473  VkBuffer buffer,
    14474  VmaAllocation allocation)
    14475 {
    14476  VMA_ASSERT(allocator);
    14477 
    14478  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14479  {
    14480  return;
    14481  }
    14482 
    14483  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14484 
    14485  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14486 
    14487 #if VMA_RECORDING_ENABLED
    14488  if(allocator->GetRecorder() != VMA_NULL)
    14489  {
    14490  allocator->GetRecorder()->RecordDestroyBuffer(
    14491  allocator->GetCurrentFrameIndex(),
    14492  allocation);
    14493  }
    14494 #endif
    14495 
    14496  if(buffer != VK_NULL_HANDLE)
    14497  {
    14498  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14499  }
    14500 
    14501  if(allocation != VK_NULL_HANDLE)
    14502  {
    14503  allocator->FreeMemory(allocation);
    14504  }
    14505 }
    14506 
    14507 VkResult vmaCreateImage(
    14508  VmaAllocator allocator,
    14509  const VkImageCreateInfo* pImageCreateInfo,
    14510  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14511  VkImage* pImage,
    14512  VmaAllocation* pAllocation,
    14513  VmaAllocationInfo* pAllocationInfo)
    14514 {
    14515  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14516 
    14517  if(pImageCreateInfo->extent.width == 0 ||
    14518  pImageCreateInfo->extent.height == 0 ||
    14519  pImageCreateInfo->extent.depth == 0 ||
    14520  pImageCreateInfo->mipLevels == 0 ||
    14521  pImageCreateInfo->arrayLayers == 0)
    14522  {
    14523  return VK_ERROR_VALIDATION_FAILED_EXT;
    14524  }
    14525 
    14526  VMA_DEBUG_LOG("vmaCreateImage");
    14527 
    14528  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14529 
    14530  *pImage = VK_NULL_HANDLE;
    14531  *pAllocation = VK_NULL_HANDLE;
    14532 
    14533  // 1. Create VkImage.
    14534  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14535  allocator->m_hDevice,
    14536  pImageCreateInfo,
    14537  allocator->GetAllocationCallbacks(),
    14538  pImage);
    14539  if(res >= 0)
    14540  {
    14541  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14542  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14544 
    14545  // 2. Allocate memory using allocator.
    14546  VkMemoryRequirements vkMemReq = {};
    14547  bool requiresDedicatedAllocation = false;
    14548  bool prefersDedicatedAllocation = false;
    14549  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14550  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14551 
    14552  res = allocator->AllocateMemory(
    14553  vkMemReq,
    14554  requiresDedicatedAllocation,
    14555  prefersDedicatedAllocation,
    14556  VK_NULL_HANDLE, // dedicatedBuffer
    14557  *pImage, // dedicatedImage
    14558  *pAllocationCreateInfo,
    14559  suballocType,
    14560  pAllocation);
    14561 
    14562 #if VMA_RECORDING_ENABLED
    14563  if(allocator->GetRecorder() != VMA_NULL)
    14564  {
    14565  allocator->GetRecorder()->RecordCreateImage(
    14566  allocator->GetCurrentFrameIndex(),
    14567  *pImageCreateInfo,
    14568  *pAllocationCreateInfo,
    14569  *pAllocation);
    14570  }
    14571 #endif
    14572 
    14573  if(res >= 0)
    14574  {
    14575  // 3. Bind image with memory.
    14576  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14577  if(res >= 0)
    14578  {
    14579  // All steps succeeded.
    14580  #if VMA_STATS_STRING_ENABLED
    14581  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14582  #endif
    14583  if(pAllocationInfo != VMA_NULL)
    14584  {
    14585  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14586  }
    14587 
    14588  return VK_SUCCESS;
    14589  }
    14590  allocator->FreeMemory(*pAllocation);
    14591  *pAllocation = VK_NULL_HANDLE;
    14592  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14593  *pImage = VK_NULL_HANDLE;
    14594  return res;
    14595  }
    14596  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14597  *pImage = VK_NULL_HANDLE;
    14598  return res;
    14599  }
    14600  return res;
    14601 }
    14602 
    14603 void vmaDestroyImage(
    14604  VmaAllocator allocator,
    14605  VkImage image,
    14606  VmaAllocation allocation)
    14607 {
    14608  VMA_ASSERT(allocator);
    14609 
    14610  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14611  {
    14612  return;
    14613  }
    14614 
    14615  VMA_DEBUG_LOG("vmaDestroyImage");
    14616 
    14617  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14618 
    14619 #if VMA_RECORDING_ENABLED
    14620  if(allocator->GetRecorder() != VMA_NULL)
    14621  {
    14622  allocator->GetRecorder()->RecordDestroyImage(
    14623  allocator->GetCurrentFrameIndex(),
    14624  allocation);
    14625  }
    14626 #endif
    14627 
    14628  if(image != VK_NULL_HANDLE)
    14629  {
    14630  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14631  }
    14632  if(allocation != VK_NULL_HANDLE)
    14633  {
    14634  allocator->FreeMemory(allocation);
    14635  }
    14636 }
    14637 
    14638 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    -
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1887
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1479 /*
    1480 Define this macro to 0/1 to disable/enable support for recording functionality,
    1481 available through VmaAllocatorCreateInfo::pRecordSettings.
    1482 */
    1483 #ifndef VMA_RECORDING_ENABLED
    1484  #ifdef _WIN32
    1485  #define VMA_RECORDING_ENABLED 1
    1486  #else
    1487  #define VMA_RECORDING_ENABLED 0
    1488  #endif
    1489 #endif
    1490 
    1491 #ifndef NOMINMAX
    1492  #define NOMINMAX // For windows.h
    1493 #endif
    1494 
    1495 #include <vulkan/vulkan.h>
    1496 
    1497 #if VMA_RECORDING_ENABLED
    1498  #include <windows.h>
    1499 #endif
    1500 
    1501 #if !defined(VMA_DEDICATED_ALLOCATION)
    1502  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1503  #define VMA_DEDICATED_ALLOCATION 1
    1504  #else
    1505  #define VMA_DEDICATED_ALLOCATION 0
    1506  #endif
    1507 #endif
    1508 
    1518 VK_DEFINE_HANDLE(VmaAllocator)
    1519 
    1520 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1522  VmaAllocator allocator,
    1523  uint32_t memoryType,
    1524  VkDeviceMemory memory,
    1525  VkDeviceSize size);
    1527 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1528  VmaAllocator allocator,
    1529  uint32_t memoryType,
    1530  VkDeviceMemory memory,
    1531  VkDeviceSize size);
    1532 
    1546 
    1576 
    1579 typedef VkFlags VmaAllocatorCreateFlags;
    1580 
    1585 typedef struct VmaVulkanFunctions {
    1586  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1587  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1588  PFN_vkAllocateMemory vkAllocateMemory;
    1589  PFN_vkFreeMemory vkFreeMemory;
    1590  PFN_vkMapMemory vkMapMemory;
    1591  PFN_vkUnmapMemory vkUnmapMemory;
    1592  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1593  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1594  PFN_vkBindBufferMemory vkBindBufferMemory;
    1595  PFN_vkBindImageMemory vkBindImageMemory;
    1596  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1597  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1598  PFN_vkCreateBuffer vkCreateBuffer;
    1599  PFN_vkDestroyBuffer vkDestroyBuffer;
    1600  PFN_vkCreateImage vkCreateImage;
    1601  PFN_vkDestroyImage vkDestroyImage;
    1602 #if VMA_DEDICATED_ALLOCATION
    1603  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1604  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1605 #endif
    1607 
    1609 typedef enum VmaRecordFlagBits {
    1616 
    1619 typedef VkFlags VmaRecordFlags;
    1620 
    1622 typedef struct VmaRecordSettings
    1623 {
    1633  const char* pFilePath;
    1635 
    1638 {
    1642 
    1643  VkPhysicalDevice physicalDevice;
    1645 
    1646  VkDevice device;
    1648 
    1651 
    1652  const VkAllocationCallbacks* pAllocationCallbacks;
    1654 
    1694  const VkDeviceSize* pHeapSizeLimit;
    1715 
    1717 VkResult vmaCreateAllocator(
    1718  const VmaAllocatorCreateInfo* pCreateInfo,
    1719  VmaAllocator* pAllocator);
    1720 
    1722 void vmaDestroyAllocator(
    1723  VmaAllocator allocator);
    1724 
    1730  VmaAllocator allocator,
    1731  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1732 
    1738  VmaAllocator allocator,
    1739  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1740 
    1748  VmaAllocator allocator,
    1749  uint32_t memoryTypeIndex,
    1750  VkMemoryPropertyFlags* pFlags);
    1751 
    1761  VmaAllocator allocator,
    1762  uint32_t frameIndex);
    1763 
    1766 typedef struct VmaStatInfo
    1767 {
    1769  uint32_t blockCount;
    1775  VkDeviceSize usedBytes;
    1777  VkDeviceSize unusedBytes;
    1780 } VmaStatInfo;
    1781 
    1783 typedef struct VmaStats
    1784 {
    1785  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1786  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1788 } VmaStats;
    1789 
    1791 void vmaCalculateStats(
    1792  VmaAllocator allocator,
    1793  VmaStats* pStats);
    1794 
    1795 #define VMA_STATS_STRING_ENABLED 1
    1796 
    1797 #if VMA_STATS_STRING_ENABLED
    1798 
    1800 
    1802 void vmaBuildStatsString(
    1803  VmaAllocator allocator,
    1804  char** ppStatsString,
    1805  VkBool32 detailedMap);
    1806 
    1807 void vmaFreeStatsString(
    1808  VmaAllocator allocator,
    1809  char* pStatsString);
    1810 
    1811 #endif // #if VMA_STATS_STRING_ENABLED
    1812 
    1821 VK_DEFINE_HANDLE(VmaPool)
    1822 
    1823 typedef enum VmaMemoryUsage
    1824 {
    1873 } VmaMemoryUsage;
    1874 
    1889 
    1944 
    1960 
    1970 
    1977 
    1981 
    1983 {
    1996  VkMemoryPropertyFlags requiredFlags;
    2001  VkMemoryPropertyFlags preferredFlags;
    2009  uint32_t memoryTypeBits;
    2022  void* pUserData;
    2024 
    2041 VkResult vmaFindMemoryTypeIndex(
    2042  VmaAllocator allocator,
    2043  uint32_t memoryTypeBits,
    2044  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2045  uint32_t* pMemoryTypeIndex);
    2046 
    2060  VmaAllocator allocator,
    2061  const VkBufferCreateInfo* pBufferCreateInfo,
    2062  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2063  uint32_t* pMemoryTypeIndex);
    2064 
    2078  VmaAllocator allocator,
    2079  const VkImageCreateInfo* pImageCreateInfo,
    2080  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2081  uint32_t* pMemoryTypeIndex);
    2082 
    2103 
    2120 
    2131 
    2137 
    2140 typedef VkFlags VmaPoolCreateFlags;
    2141 
    2144 typedef struct VmaPoolCreateInfo {
    2159  VkDeviceSize blockSize;
    2188 
    2191 typedef struct VmaPoolStats {
    2194  VkDeviceSize size;
    2197  VkDeviceSize unusedSize;
    2210  VkDeviceSize unusedRangeSizeMax;
    2213  size_t blockCount;
    2214 } VmaPoolStats;
    2215 
    2222 VkResult vmaCreatePool(
    2223  VmaAllocator allocator,
    2224  const VmaPoolCreateInfo* pCreateInfo,
    2225  VmaPool* pPool);
    2226 
    2229 void vmaDestroyPool(
    2230  VmaAllocator allocator,
    2231  VmaPool pool);
    2232 
    2239 void vmaGetPoolStats(
    2240  VmaAllocator allocator,
    2241  VmaPool pool,
    2242  VmaPoolStats* pPoolStats);
    2243 
    2251  VmaAllocator allocator,
    2252  VmaPool pool,
    2253  size_t* pLostAllocationCount);
    2254 
    2269 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2270 
    2295 VK_DEFINE_HANDLE(VmaAllocation)
    2296 
    2297 
    2299 typedef struct VmaAllocationInfo {
    2304  uint32_t memoryType;
    2313  VkDeviceMemory deviceMemory;
    2318  VkDeviceSize offset;
    2323  VkDeviceSize size;
    2337  void* pUserData;
    2339 
    2350 VkResult vmaAllocateMemory(
    2351  VmaAllocator allocator,
    2352  const VkMemoryRequirements* pVkMemoryRequirements,
    2353  const VmaAllocationCreateInfo* pCreateInfo,
    2354  VmaAllocation* pAllocation,
    2355  VmaAllocationInfo* pAllocationInfo);
    2356 
    2364  VmaAllocator allocator,
    2365  VkBuffer buffer,
    2366  const VmaAllocationCreateInfo* pCreateInfo,
    2367  VmaAllocation* pAllocation,
    2368  VmaAllocationInfo* pAllocationInfo);
    2369 
    2371 VkResult vmaAllocateMemoryForImage(
    2372  VmaAllocator allocator,
    2373  VkImage image,
    2374  const VmaAllocationCreateInfo* pCreateInfo,
    2375  VmaAllocation* pAllocation,
    2376  VmaAllocationInfo* pAllocationInfo);
    2377 
    2379 void vmaFreeMemory(
    2380  VmaAllocator allocator,
    2381  VmaAllocation allocation);
    2382 
    2403 VkResult vmaResizeAllocation(
    2404  VmaAllocator allocator,
    2405  VmaAllocation allocation,
    2406  VkDeviceSize newSize);
    2407 
    2425  VmaAllocator allocator,
    2426  VmaAllocation allocation,
    2427  VmaAllocationInfo* pAllocationInfo);
    2428 
    2443 VkBool32 vmaTouchAllocation(
    2444  VmaAllocator allocator,
    2445  VmaAllocation allocation);
    2446 
    2461  VmaAllocator allocator,
    2462  VmaAllocation allocation,
    2463  void* pUserData);
    2464 
    2476  VmaAllocator allocator,
    2477  VmaAllocation* pAllocation);
    2478 
    2513 VkResult vmaMapMemory(
    2514  VmaAllocator allocator,
    2515  VmaAllocation allocation,
    2516  void** ppData);
    2517 
    2522 void vmaUnmapMemory(
    2523  VmaAllocator allocator,
    2524  VmaAllocation allocation);
    2525 
    2538 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2539 
    2552 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2553 
    2570 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2571 
    2573 typedef struct VmaDefragmentationInfo {
    2578  VkDeviceSize maxBytesToMove;
    2585 
    2587 typedef struct VmaDefragmentationStats {
    2589  VkDeviceSize bytesMoved;
    2591  VkDeviceSize bytesFreed;
    2597 
    2636 VkResult vmaDefragment(
    2637  VmaAllocator allocator,
    2638  VmaAllocation* pAllocations,
    2639  size_t allocationCount,
    2640  VkBool32* pAllocationsChanged,
    2641  const VmaDefragmentationInfo *pDefragmentationInfo,
    2642  VmaDefragmentationStats* pDefragmentationStats);
    2643 
    2656 VkResult vmaBindBufferMemory(
    2657  VmaAllocator allocator,
    2658  VmaAllocation allocation,
    2659  VkBuffer buffer);
    2660 
    2673 VkResult vmaBindImageMemory(
    2674  VmaAllocator allocator,
    2675  VmaAllocation allocation,
    2676  VkImage image);
    2677 
    2704 VkResult vmaCreateBuffer(
    2705  VmaAllocator allocator,
    2706  const VkBufferCreateInfo* pBufferCreateInfo,
    2707  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2708  VkBuffer* pBuffer,
    2709  VmaAllocation* pAllocation,
    2710  VmaAllocationInfo* pAllocationInfo);
    2711 
    2723 void vmaDestroyBuffer(
    2724  VmaAllocator allocator,
    2725  VkBuffer buffer,
    2726  VmaAllocation allocation);
    2727 
    2729 VkResult vmaCreateImage(
    2730  VmaAllocator allocator,
    2731  const VkImageCreateInfo* pImageCreateInfo,
    2732  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2733  VkImage* pImage,
    2734  VmaAllocation* pAllocation,
    2735  VmaAllocationInfo* pAllocationInfo);
    2736 
    2748 void vmaDestroyImage(
    2749  VmaAllocator allocator,
    2750  VkImage image,
    2751  VmaAllocation allocation);
    2752 
    2753 #ifdef __cplusplus
    2754 }
    2755 #endif
    2756 
    2757 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    2758 
    2759 // For Visual Studio IntelliSense.
    2760 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    2761 #define VMA_IMPLEMENTATION
    2762 #endif
    2763 
    2764 #ifdef VMA_IMPLEMENTATION
    2765 #undef VMA_IMPLEMENTATION
    2766 
    2767 #include <cstdint>
    2768 #include <cstdlib>
    2769 #include <cstring>
    2770 
    2771 /*******************************************************************************
    2772 CONFIGURATION SECTION
    2773 
    2774 Define some of these macros before each #include of this header or change them
    2775 here if you need other then default behavior depending on your environment.
    2776 */
    2777 
    2778 /*
    2779 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    2780 internally, like:
    2781 
    2782  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    2783 
    2784 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    2785 VmaAllocatorCreateInfo::pVulkanFunctions.
    2786 */
    2787 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    2788 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    2789 #endif
    2790 
    2791 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    2792 //#define VMA_USE_STL_CONTAINERS 1
    2793 
    2794 /* Set this macro to 1 to make the library including and using STL containers:
    2795 std::pair, std::vector, std::list, std::unordered_map.
    2796 
    2797 Set it to 0 or undefined to make the library using its own implementation of
    2798 the containers.
    2799 */
    2800 #if VMA_USE_STL_CONTAINERS
    2801  #define VMA_USE_STL_VECTOR 1
    2802  #define VMA_USE_STL_UNORDERED_MAP 1
    2803  #define VMA_USE_STL_LIST 1
    2804 #endif
    2805 
    2806 #if VMA_USE_STL_VECTOR
    2807  #include <vector>
    2808 #endif
    2809 
    2810 #if VMA_USE_STL_UNORDERED_MAP
    2811  #include <unordered_map>
    2812 #endif
    2813 
    2814 #if VMA_USE_STL_LIST
    2815  #include <list>
    2816 #endif
    2817 
    2818 /*
    2819 Following headers are used in this CONFIGURATION section only, so feel free to
    2820 remove them if not needed.
    2821 */
    2822 #include <cassert> // for assert
    2823 #include <algorithm> // for min, max
    2824 #include <mutex> // for std::mutex
    2825 #include <atomic> // for std::atomic
    2826 
    2827 #ifndef VMA_NULL
    2828  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    2829  #define VMA_NULL nullptr
    2830 #endif
    2831 
    2832 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    2833 #include <cstdlib>
    2834 void *aligned_alloc(size_t alignment, size_t size)
    2835 {
    2836  // alignment must be >= sizeof(void*)
    2837  if(alignment < sizeof(void*))
    2838  {
    2839  alignment = sizeof(void*);
    2840  }
    2841 
    2842  return memalign(alignment, size);
    2843 }
    2844 #elif defined(__APPLE__) || defined(__ANDROID__)
    2845 #include <cstdlib>
    2846 void *aligned_alloc(size_t alignment, size_t size)
    2847 {
    2848  // alignment must be >= sizeof(void*)
    2849  if(alignment < sizeof(void*))
    2850  {
    2851  alignment = sizeof(void*);
    2852  }
    2853 
    2854  void *pointer;
    2855  if(posix_memalign(&pointer, alignment, size) == 0)
    2856  return pointer;
    2857  return VMA_NULL;
    2858 }
    2859 #endif
    2860 
    2861 // If your compiler is not compatible with C++11 and definition of
    2862 // aligned_alloc() function is missing, uncommeting following line may help:
    2863 
    2864 //#include <malloc.h>
    2865 
    2866 // Normal assert to check for programmer's errors, especially in Debug configuration.
    2867 #ifndef VMA_ASSERT
    2868  #ifdef _DEBUG
    2869  #define VMA_ASSERT(expr) assert(expr)
    2870  #else
    2871  #define VMA_ASSERT(expr)
    2872  #endif
    2873 #endif
    2874 
    2875 // Assert that will be called very often, like inside data structures e.g. operator[].
    2876 // Making it non-empty can make program slow.
    2877 #ifndef VMA_HEAVY_ASSERT
    2878  #ifdef _DEBUG
    2879  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    2880  #else
    2881  #define VMA_HEAVY_ASSERT(expr)
    2882  #endif
    2883 #endif
    2884 
    2885 #ifndef VMA_ALIGN_OF
    2886  #define VMA_ALIGN_OF(type) (__alignof(type))
    2887 #endif
    2888 
    2889 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    2890  #if defined(_WIN32)
    2891  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    2892  #else
    2893  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    2894  #endif
    2895 #endif
    2896 
    2897 #ifndef VMA_SYSTEM_FREE
    2898  #if defined(_WIN32)
    2899  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    2900  #else
    2901  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    2902  #endif
    2903 #endif
    2904 
    2905 #ifndef VMA_MIN
    2906  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    2907 #endif
    2908 
    2909 #ifndef VMA_MAX
    2910  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    2911 #endif
    2912 
    2913 #ifndef VMA_SWAP
    2914  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    2915 #endif
    2916 
    2917 #ifndef VMA_SORT
    2918  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    2919 #endif
    2920 
    2921 #ifndef VMA_DEBUG_LOG
    2922  #define VMA_DEBUG_LOG(format, ...)
    2923  /*
    2924  #define VMA_DEBUG_LOG(format, ...) do { \
    2925  printf(format, __VA_ARGS__); \
    2926  printf("\n"); \
    2927  } while(false)
    2928  */
    2929 #endif
    2930 
    2931 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    2932 #if VMA_STATS_STRING_ENABLED
    2933  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    2934  {
    2935  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    2936  }
    2937  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    2938  {
    2939  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    2940  }
    2941  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    2942  {
    2943  snprintf(outStr, strLen, "%p", ptr);
    2944  }
    2945 #endif
    2946 
    2947 #ifndef VMA_MUTEX
    2948  class VmaMutex
    2949  {
    2950  public:
    2951  VmaMutex() { }
    2952  ~VmaMutex() { }
    2953  void Lock() { m_Mutex.lock(); }
    2954  void Unlock() { m_Mutex.unlock(); }
    2955  private:
    2956  std::mutex m_Mutex;
    2957  };
    2958  #define VMA_MUTEX VmaMutex
    2959 #endif
    2960 
    2961 /*
    2962 If providing your own implementation, you need to implement a subset of std::atomic:
    2963 
    2964 - Constructor(uint32_t desired)
    2965 - uint32_t load() const
    2966 - void store(uint32_t desired)
    2967 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    2968 */
    2969 #ifndef VMA_ATOMIC_UINT32
    2970  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    2971 #endif
    2972 
    2973 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    2974 
    2978  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    2979 #endif
    2980 
    2981 #ifndef VMA_DEBUG_ALIGNMENT
    2982 
    2986  #define VMA_DEBUG_ALIGNMENT (1)
    2987 #endif
    2988 
    2989 #ifndef VMA_DEBUG_MARGIN
    2990 
    2994  #define VMA_DEBUG_MARGIN (0)
    2995 #endif
    2996 
    2997 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    2998 
    3002  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3003 #endif
    3004 
    3005 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3006 
    3011  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3012 #endif
    3013 
    3014 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3015 
    3019  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3020 #endif
    3021 
    3022 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3023 
    3027  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3028 #endif
    3029 
    3030 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3031  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3033 #endif
    3034 
    3035 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3036  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3038 #endif
    3039 
    3040 #ifndef VMA_CLASS_NO_COPY
    3041  #define VMA_CLASS_NO_COPY(className) \
    3042  private: \
    3043  className(const className&) = delete; \
    3044  className& operator=(const className&) = delete;
    3045 #endif
    3046 
    3047 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3048 
    3049 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3050 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3051 
    3052 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3053 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3054 
    3055 /*******************************************************************************
    3056 END OF CONFIGURATION
    3057 */
    3058 
    3059 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3060  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3061 
    3062 // Returns number of bits set to 1 in (v).
    3063 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3064 {
    3065  uint32_t c = v - ((v >> 1) & 0x55555555);
    3066  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3067  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3068  c = ((c >> 8) + c) & 0x00FF00FF;
    3069  c = ((c >> 16) + c) & 0x0000FFFF;
    3070  return c;
    3071 }
    3072 
    3073 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3074 // Use types like uint32_t, uint64_t as T.
    3075 template <typename T>
    3076 static inline T VmaAlignUp(T val, T align)
    3077 {
    3078  return (val + align - 1) / align * align;
    3079 }
    3080 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3081 // Use types like uint32_t, uint64_t as T.
    3082 template <typename T>
    3083 static inline T VmaAlignDown(T val, T align)
    3084 {
    3085  return val / align * align;
    3086 }
    3087 
    3088 // Division with mathematical rounding to nearest number.
    3089 template <typename T>
    3090 static inline T VmaRoundDiv(T x, T y)
    3091 {
    3092  return (x + (y / (T)2)) / y;
    3093 }
    3094 
    3095 /*
    3096 Returns true if given number is a power of two.
    3097 T must be unsigned integer number or signed integer but always nonnegative.
    3098 For 0 returns true.
    3099 */
    3100 template <typename T>
    3101 inline bool VmaIsPow2(T x)
    3102 {
    3103  return (x & (x-1)) == 0;
    3104 }
    3105 
    3106 // Returns smallest power of 2 greater or equal to v.
    3107 static inline uint32_t VmaNextPow2(uint32_t v)
    3108 {
    3109  v--;
    3110  v |= v >> 1;
    3111  v |= v >> 2;
    3112  v |= v >> 4;
    3113  v |= v >> 8;
    3114  v |= v >> 16;
    3115  v++;
    3116  return v;
    3117 }
    3118 static inline uint64_t VmaNextPow2(uint64_t v)
    3119 {
    3120  v--;
    3121  v |= v >> 1;
    3122  v |= v >> 2;
    3123  v |= v >> 4;
    3124  v |= v >> 8;
    3125  v |= v >> 16;
    3126  v |= v >> 32;
    3127  v++;
    3128  return v;
    3129 }
    3130 
    3131 // Returns largest power of 2 less or equal to v.
    3132 static inline uint32_t VmaPrevPow2(uint32_t v)
    3133 {
    3134  v |= v >> 1;
    3135  v |= v >> 2;
    3136  v |= v >> 4;
    3137  v |= v >> 8;
    3138  v |= v >> 16;
    3139  v = v ^ (v >> 1);
    3140  return v;
    3141 }
    3142 static inline uint64_t VmaPrevPow2(uint64_t v)
    3143 {
    3144  v |= v >> 1;
    3145  v |= v >> 2;
    3146  v |= v >> 4;
    3147  v |= v >> 8;
    3148  v |= v >> 16;
    3149  v |= v >> 32;
    3150  v = v ^ (v >> 1);
    3151  return v;
    3152 }
    3153 
    3154 static inline bool VmaStrIsEmpty(const char* pStr)
    3155 {
    3156  return pStr == VMA_NULL || *pStr == '\0';
    3157 }
    3158 
    3159 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3160 {
    3161  switch(algorithm)
    3162  {
    3164  return "Linear";
    3166  return "Buddy";
    3167  case 0:
    3168  return "Default";
    3169  default:
    3170  VMA_ASSERT(0);
    3171  return "";
    3172  }
    3173 }
    3174 
    3175 #ifndef VMA_SORT
    3176 
    3177 template<typename Iterator, typename Compare>
    3178 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3179 {
    3180  Iterator centerValue = end; --centerValue;
    3181  Iterator insertIndex = beg;
    3182  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3183  {
    3184  if(cmp(*memTypeIndex, *centerValue))
    3185  {
    3186  if(insertIndex != memTypeIndex)
    3187  {
    3188  VMA_SWAP(*memTypeIndex, *insertIndex);
    3189  }
    3190  ++insertIndex;
    3191  }
    3192  }
    3193  if(insertIndex != centerValue)
    3194  {
    3195  VMA_SWAP(*insertIndex, *centerValue);
    3196  }
    3197  return insertIndex;
    3198 }
    3199 
    3200 template<typename Iterator, typename Compare>
    3201 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3202 {
    3203  if(beg < end)
    3204  {
    3205  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3206  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3207  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3208  }
    3209 }
    3210 
    3211 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3212 
    3213 #endif // #ifndef VMA_SORT
    3214 
    3215 /*
    3216 Returns true if two memory blocks occupy overlapping pages.
    3217 ResourceA must be in less memory offset than ResourceB.
    3218 
    3219 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3220 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3221 */
    3222 static inline bool VmaBlocksOnSamePage(
    3223  VkDeviceSize resourceAOffset,
    3224  VkDeviceSize resourceASize,
    3225  VkDeviceSize resourceBOffset,
    3226  VkDeviceSize pageSize)
    3227 {
    3228  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3229  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3230  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3231  VkDeviceSize resourceBStart = resourceBOffset;
    3232  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3233  return resourceAEndPage == resourceBStartPage;
    3234 }
    3235 
    3236 enum VmaSuballocationType
    3237 {
    3238  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3239  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3240  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3241  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3242  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3243  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3244  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3245 };
    3246 
    3247 /*
    3248 Returns true if given suballocation types could conflict and must respect
    3249 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3250 or linear image and another one is optimal image. If type is unknown, behave
    3251 conservatively.
    3252 */
    3253 static inline bool VmaIsBufferImageGranularityConflict(
    3254  VmaSuballocationType suballocType1,
    3255  VmaSuballocationType suballocType2)
    3256 {
    3257  if(suballocType1 > suballocType2)
    3258  {
    3259  VMA_SWAP(suballocType1, suballocType2);
    3260  }
    3261 
    3262  switch(suballocType1)
    3263  {
    3264  case VMA_SUBALLOCATION_TYPE_FREE:
    3265  return false;
    3266  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3267  return true;
    3268  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3269  return
    3270  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3271  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3272  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3273  return
    3274  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3275  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3276  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3277  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3278  return
    3279  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3280  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3281  return false;
    3282  default:
    3283  VMA_ASSERT(0);
    3284  return true;
    3285  }
    3286 }
    3287 
    3288 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3289 {
    3290  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3291  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3292  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3293  {
    3294  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3295  }
    3296 }
    3297 
    3298 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3299 {
    3300  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3301  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3302  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3303  {
    3304  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3305  {
    3306  return false;
    3307  }
    3308  }
    3309  return true;
    3310 }
    3311 
    3312 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3313 struct VmaMutexLock
    3314 {
    3315  VMA_CLASS_NO_COPY(VmaMutexLock)
    3316 public:
    3317  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
    3318  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3319  {
    3320  if(m_pMutex)
    3321  {
    3322  m_pMutex->Lock();
    3323  }
    3324  }
    3325 
    3326  ~VmaMutexLock()
    3327  {
    3328  if(m_pMutex)
    3329  {
    3330  m_pMutex->Unlock();
    3331  }
    3332  }
    3333 
    3334 private:
    3335  VMA_MUTEX* m_pMutex;
    3336 };
    3337 
    3338 #if VMA_DEBUG_GLOBAL_MUTEX
    3339  static VMA_MUTEX gDebugGlobalMutex;
    3340  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3341 #else
    3342  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3343 #endif
    3344 
    3345 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3346 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3347 
    3348 /*
    3349 Performs binary search and returns iterator to first element that is greater or
    3350 equal to (key), according to comparison (cmp).
    3351 
    3352 Cmp should return true if first argument is less than second argument.
    3353 
    3354 Returned value is the found element, if present in the collection or place where
    3355 new element with value (key) should be inserted.
    3356 */
    3357 template <typename CmpLess, typename IterT, typename KeyT>
    3358 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3359 {
    3360  size_t down = 0, up = (end - beg);
    3361  while(down < up)
    3362  {
    3363  const size_t mid = (down + up) / 2;
    3364  if(cmp(*(beg+mid), key))
    3365  {
    3366  down = mid + 1;
    3367  }
    3368  else
    3369  {
    3370  up = mid;
    3371  }
    3372  }
    3373  return beg + down;
    3374 }
    3375 
    3377 // Memory allocation
    3378 
    3379 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3380 {
    3381  if((pAllocationCallbacks != VMA_NULL) &&
    3382  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3383  {
    3384  return (*pAllocationCallbacks->pfnAllocation)(
    3385  pAllocationCallbacks->pUserData,
    3386  size,
    3387  alignment,
    3388  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3389  }
    3390  else
    3391  {
    3392  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3393  }
    3394 }
    3395 
    3396 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3397 {
    3398  if((pAllocationCallbacks != VMA_NULL) &&
    3399  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3400  {
    3401  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3402  }
    3403  else
    3404  {
    3405  VMA_SYSTEM_FREE(ptr);
    3406  }
    3407 }
    3408 
    3409 template<typename T>
    3410 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3411 {
    3412  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3413 }
    3414 
    3415 template<typename T>
    3416 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3417 {
    3418  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3419 }
    3420 
    3421 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3422 
    3423 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3424 
    3425 template<typename T>
    3426 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3427 {
    3428  ptr->~T();
    3429  VmaFree(pAllocationCallbacks, ptr);
    3430 }
    3431 
    3432 template<typename T>
    3433 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3434 {
    3435  if(ptr != VMA_NULL)
    3436  {
    3437  for(size_t i = count; i--; )
    3438  {
    3439  ptr[i].~T();
    3440  }
    3441  VmaFree(pAllocationCallbacks, ptr);
    3442  }
    3443 }
    3444 
    3445 // STL-compatible allocator.
    3446 template<typename T>
    3447 class VmaStlAllocator
    3448 {
    3449 public:
    3450  const VkAllocationCallbacks* const m_pCallbacks;
    3451  typedef T value_type;
    3452 
    3453  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3454  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3455 
    3456  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3457  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3458 
    3459  template<typename U>
    3460  bool operator==(const VmaStlAllocator<U>& rhs) const
    3461  {
    3462  return m_pCallbacks == rhs.m_pCallbacks;
    3463  }
    3464  template<typename U>
    3465  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3466  {
    3467  return m_pCallbacks != rhs.m_pCallbacks;
    3468  }
    3469 
    3470  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3471 };
    3472 
    3473 #if VMA_USE_STL_VECTOR
    3474 
    3475 #define VmaVector std::vector
    3476 
    3477 template<typename T, typename allocatorT>
    3478 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3479 {
    3480  vec.insert(vec.begin() + index, item);
    3481 }
    3482 
    3483 template<typename T, typename allocatorT>
    3484 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3485 {
    3486  vec.erase(vec.begin() + index);
    3487 }
    3488 
    3489 #else // #if VMA_USE_STL_VECTOR
    3490 
    3491 /* Class with interface compatible with subset of std::vector.
    3492 T must be POD because constructors and destructors are not called and memcpy is
    3493 used for these objects. */
    3494 template<typename T, typename AllocatorT>
    3495 class VmaVector
    3496 {
    3497 public:
    3498  typedef T value_type;
    3499 
    3500  VmaVector(const AllocatorT& allocator) :
    3501  m_Allocator(allocator),
    3502  m_pArray(VMA_NULL),
    3503  m_Count(0),
    3504  m_Capacity(0)
    3505  {
    3506  }
    3507 
    3508  VmaVector(size_t count, const AllocatorT& allocator) :
    3509  m_Allocator(allocator),
    3510  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3511  m_Count(count),
    3512  m_Capacity(count)
    3513  {
    3514  }
    3515 
    3516  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3517  m_Allocator(src.m_Allocator),
    3518  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3519  m_Count(src.m_Count),
    3520  m_Capacity(src.m_Count)
    3521  {
    3522  if(m_Count != 0)
    3523  {
    3524  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3525  }
    3526  }
    3527 
    3528  ~VmaVector()
    3529  {
    3530  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3531  }
    3532 
    3533  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3534  {
    3535  if(&rhs != this)
    3536  {
    3537  resize(rhs.m_Count);
    3538  if(m_Count != 0)
    3539  {
    3540  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3541  }
    3542  }
    3543  return *this;
    3544  }
    3545 
    3546  bool empty() const { return m_Count == 0; }
    3547  size_t size() const { return m_Count; }
    3548  T* data() { return m_pArray; }
    3549  const T* data() const { return m_pArray; }
    3550 
    3551  T& operator[](size_t index)
    3552  {
    3553  VMA_HEAVY_ASSERT(index < m_Count);
    3554  return m_pArray[index];
    3555  }
    3556  const T& operator[](size_t index) const
    3557  {
    3558  VMA_HEAVY_ASSERT(index < m_Count);
    3559  return m_pArray[index];
    3560  }
    3561 
    3562  T& front()
    3563  {
    3564  VMA_HEAVY_ASSERT(m_Count > 0);
    3565  return m_pArray[0];
    3566  }
    3567  const T& front() const
    3568  {
    3569  VMA_HEAVY_ASSERT(m_Count > 0);
    3570  return m_pArray[0];
    3571  }
    3572  T& back()
    3573  {
    3574  VMA_HEAVY_ASSERT(m_Count > 0);
    3575  return m_pArray[m_Count - 1];
    3576  }
    3577  const T& back() const
    3578  {
    3579  VMA_HEAVY_ASSERT(m_Count > 0);
    3580  return m_pArray[m_Count - 1];
    3581  }
    3582 
    3583  void reserve(size_t newCapacity, bool freeMemory = false)
    3584  {
    3585  newCapacity = VMA_MAX(newCapacity, m_Count);
    3586 
    3587  if((newCapacity < m_Capacity) && !freeMemory)
    3588  {
    3589  newCapacity = m_Capacity;
    3590  }
    3591 
    3592  if(newCapacity != m_Capacity)
    3593  {
    3594  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    3595  if(m_Count != 0)
    3596  {
    3597  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    3598  }
    3599  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3600  m_Capacity = newCapacity;
    3601  m_pArray = newArray;
    3602  }
    3603  }
    3604 
    3605  void resize(size_t newCount, bool freeMemory = false)
    3606  {
    3607  size_t newCapacity = m_Capacity;
    3608  if(newCount > m_Capacity)
    3609  {
    3610  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    3611  }
    3612  else if(freeMemory)
    3613  {
    3614  newCapacity = newCount;
    3615  }
    3616 
    3617  if(newCapacity != m_Capacity)
    3618  {
    3619  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    3620  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    3621  if(elementsToCopy != 0)
    3622  {
    3623  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    3624  }
    3625  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3626  m_Capacity = newCapacity;
    3627  m_pArray = newArray;
    3628  }
    3629 
    3630  m_Count = newCount;
    3631  }
    3632 
    3633  void clear(bool freeMemory = false)
    3634  {
    3635  resize(0, freeMemory);
    3636  }
    3637 
    3638  void insert(size_t index, const T& src)
    3639  {
    3640  VMA_HEAVY_ASSERT(index <= m_Count);
    3641  const size_t oldCount = size();
    3642  resize(oldCount + 1);
    3643  if(index < oldCount)
    3644  {
    3645  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    3646  }
    3647  m_pArray[index] = src;
    3648  }
    3649 
    3650  void remove(size_t index)
    3651  {
    3652  VMA_HEAVY_ASSERT(index < m_Count);
    3653  const size_t oldCount = size();
    3654  if(index < oldCount - 1)
    3655  {
    3656  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    3657  }
    3658  resize(oldCount - 1);
    3659  }
    3660 
    3661  void push_back(const T& src)
    3662  {
    3663  const size_t newIndex = size();
    3664  resize(newIndex + 1);
    3665  m_pArray[newIndex] = src;
    3666  }
    3667 
    3668  void pop_back()
    3669  {
    3670  VMA_HEAVY_ASSERT(m_Count > 0);
    3671  resize(size() - 1);
    3672  }
    3673 
    3674  void push_front(const T& src)
    3675  {
    3676  insert(0, src);
    3677  }
    3678 
    3679  void pop_front()
    3680  {
    3681  VMA_HEAVY_ASSERT(m_Count > 0);
    3682  remove(0);
    3683  }
    3684 
    3685  typedef T* iterator;
    3686 
    3687  iterator begin() { return m_pArray; }
    3688  iterator end() { return m_pArray + m_Count; }
    3689 
    3690 private:
    3691  AllocatorT m_Allocator;
    3692  T* m_pArray;
    3693  size_t m_Count;
    3694  size_t m_Capacity;
    3695 };
    3696 
    3697 template<typename T, typename allocatorT>
    3698 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    3699 {
    3700  vec.insert(index, item);
    3701 }
    3702 
    3703 template<typename T, typename allocatorT>
    3704 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    3705 {
    3706  vec.remove(index);
    3707 }
    3708 
    3709 #endif // #if VMA_USE_STL_VECTOR
    3710 
    3711 template<typename CmpLess, typename VectorT>
    3712 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    3713 {
    3714  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    3715  vector.data(),
    3716  vector.data() + vector.size(),
    3717  value,
    3718  CmpLess()) - vector.data();
    3719  VmaVectorInsert(vector, indexToInsert, value);
    3720  return indexToInsert;
    3721 }
    3722 
    3723 template<typename CmpLess, typename VectorT>
    3724 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    3725 {
    3726  CmpLess comparator;
    3727  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    3728  vector.begin(),
    3729  vector.end(),
    3730  value,
    3731  comparator);
    3732  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    3733  {
    3734  size_t indexToRemove = it - vector.begin();
    3735  VmaVectorRemove(vector, indexToRemove);
    3736  return true;
    3737  }
    3738  return false;
    3739 }
    3740 
    3741 template<typename CmpLess, typename IterT, typename KeyT>
    3742 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    3743 {
    3744  CmpLess comparator;
    3745  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    3746  beg, end, value, comparator);
    3747  if(it == end ||
    3748  (!comparator(*it, value) && !comparator(value, *it)))
    3749  {
    3750  return it;
    3751  }
    3752  return end;
    3753 }
    3754 
    3756 // class VmaPoolAllocator
    3757 
    3758 /*
    3759 Allocator for objects of type T using a list of arrays (pools) to speed up
    3760 allocation. Number of elements that can be allocated is not bounded because
    3761 allocator can create multiple blocks.
    3762 */
    3763 template<typename T>
    3764 class VmaPoolAllocator
    3765 {
    3766  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    3767 public:
    3768  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
    3769  ~VmaPoolAllocator();
    3770  void Clear();
    3771  T* Alloc();
    3772  void Free(T* ptr);
    3773 
    3774 private:
    3775  union Item
    3776  {
    3777  uint32_t NextFreeIndex;
    3778  T Value;
    3779  };
    3780 
    3781  struct ItemBlock
    3782  {
    3783  Item* pItems;
    3784  uint32_t FirstFreeIndex;
    3785  };
    3786 
    3787  const VkAllocationCallbacks* m_pAllocationCallbacks;
    3788  size_t m_ItemsPerBlock;
    3789  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    3790 
    3791  ItemBlock& CreateNewBlock();
    3792 };
    3793 
    3794 template<typename T>
    3795 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
    3796  m_pAllocationCallbacks(pAllocationCallbacks),
    3797  m_ItemsPerBlock(itemsPerBlock),
    3798  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    3799 {
    3800  VMA_ASSERT(itemsPerBlock > 0);
    3801 }
    3802 
    3803 template<typename T>
    3804 VmaPoolAllocator<T>::~VmaPoolAllocator()
    3805 {
    3806  Clear();
    3807 }
    3808 
    3809 template<typename T>
    3810 void VmaPoolAllocator<T>::Clear()
    3811 {
    3812  for(size_t i = m_ItemBlocks.size(); i--; )
    3813  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
    3814  m_ItemBlocks.clear();
    3815 }
    3816 
    3817 template<typename T>
    3818 T* VmaPoolAllocator<T>::Alloc()
    3819 {
    3820  for(size_t i = m_ItemBlocks.size(); i--; )
    3821  {
    3822  ItemBlock& block = m_ItemBlocks[i];
    3823  // This block has some free items: Use first one.
    3824  if(block.FirstFreeIndex != UINT32_MAX)
    3825  {
    3826  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    3827  block.FirstFreeIndex = pItem->NextFreeIndex;
    3828  return &pItem->Value;
    3829  }
    3830  }
    3831 
    3832  // No block has free item: Create new one and use it.
    3833  ItemBlock& newBlock = CreateNewBlock();
    3834  Item* const pItem = &newBlock.pItems[0];
    3835  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    3836  return &pItem->Value;
    3837 }
    3838 
    3839 template<typename T>
    3840 void VmaPoolAllocator<T>::Free(T* ptr)
    3841 {
    3842  // Search all memory blocks to find ptr.
    3843  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
    3844  {
    3845  ItemBlock& block = m_ItemBlocks[i];
    3846 
    3847  // Casting to union.
    3848  Item* pItemPtr;
    3849  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    3850 
    3851  // Check if pItemPtr is in address range of this block.
    3852  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
    3853  {
    3854  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    3855  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    3856  block.FirstFreeIndex = index;
    3857  return;
    3858  }
    3859  }
    3860  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    3861 }
    3862 
    3863 template<typename T>
    3864 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    3865 {
    3866  ItemBlock newBlock = {
    3867  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
    3868 
    3869  m_ItemBlocks.push_back(newBlock);
    3870 
    3871  // Setup singly-linked list of all free items in this block.
    3872  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
    3873  newBlock.pItems[i].NextFreeIndex = i + 1;
    3874  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
    3875  return m_ItemBlocks.back();
    3876 }
    3877 
    3879 // class VmaRawList, VmaList
    3880 
    3881 #if VMA_USE_STL_LIST
    3882 
    3883 #define VmaList std::list
    3884 
    3885 #else // #if VMA_USE_STL_LIST
    3886 
    3887 template<typename T>
    3888 struct VmaListItem
    3889 {
    3890  VmaListItem* pPrev;
    3891  VmaListItem* pNext;
    3892  T Value;
    3893 };
    3894 
    3895 // Doubly linked list.
    3896 template<typename T>
    3897 class VmaRawList
    3898 {
    3899  VMA_CLASS_NO_COPY(VmaRawList)
    3900 public:
    3901  typedef VmaListItem<T> ItemType;
    3902 
    3903  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    3904  ~VmaRawList();
    3905  void Clear();
    3906 
    3907  size_t GetCount() const { return m_Count; }
    3908  bool IsEmpty() const { return m_Count == 0; }
    3909 
    3910  ItemType* Front() { return m_pFront; }
    3911  const ItemType* Front() const { return m_pFront; }
    3912  ItemType* Back() { return m_pBack; }
    3913  const ItemType* Back() const { return m_pBack; }
    3914 
    3915  ItemType* PushBack();
    3916  ItemType* PushFront();
    3917  ItemType* PushBack(const T& value);
    3918  ItemType* PushFront(const T& value);
    3919  void PopBack();
    3920  void PopFront();
    3921 
    3922  // Item can be null - it means PushBack.
    3923  ItemType* InsertBefore(ItemType* pItem);
    3924  // Item can be null - it means PushFront.
    3925  ItemType* InsertAfter(ItemType* pItem);
    3926 
    3927  ItemType* InsertBefore(ItemType* pItem, const T& value);
    3928  ItemType* InsertAfter(ItemType* pItem, const T& value);
    3929 
    3930  void Remove(ItemType* pItem);
    3931 
    3932 private:
    3933  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    3934  VmaPoolAllocator<ItemType> m_ItemAllocator;
    3935  ItemType* m_pFront;
    3936  ItemType* m_pBack;
    3937  size_t m_Count;
    3938 };
    3939 
    3940 template<typename T>
    3941 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    3942  m_pAllocationCallbacks(pAllocationCallbacks),
    3943  m_ItemAllocator(pAllocationCallbacks, 128),
    3944  m_pFront(VMA_NULL),
    3945  m_pBack(VMA_NULL),
    3946  m_Count(0)
    3947 {
    3948 }
    3949 
    3950 template<typename T>
    3951 VmaRawList<T>::~VmaRawList()
    3952 {
    3953  // Intentionally not calling Clear, because that would be unnecessary
    3954  // computations to return all items to m_ItemAllocator as free.
    3955 }
    3956 
    3957 template<typename T>
    3958 void VmaRawList<T>::Clear()
    3959 {
    3960  if(IsEmpty() == false)
    3961  {
    3962  ItemType* pItem = m_pBack;
    3963  while(pItem != VMA_NULL)
    3964  {
    3965  ItemType* const pPrevItem = pItem->pPrev;
    3966  m_ItemAllocator.Free(pItem);
    3967  pItem = pPrevItem;
    3968  }
    3969  m_pFront = VMA_NULL;
    3970  m_pBack = VMA_NULL;
    3971  m_Count = 0;
    3972  }
    3973 }
    3974 
    3975 template<typename T>
    3976 VmaListItem<T>* VmaRawList<T>::PushBack()
    3977 {
    3978  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    3979  pNewItem->pNext = VMA_NULL;
    3980  if(IsEmpty())
    3981  {
    3982  pNewItem->pPrev = VMA_NULL;
    3983  m_pFront = pNewItem;
    3984  m_pBack = pNewItem;
    3985  m_Count = 1;
    3986  }
    3987  else
    3988  {
    3989  pNewItem->pPrev = m_pBack;
    3990  m_pBack->pNext = pNewItem;
    3991  m_pBack = pNewItem;
    3992  ++m_Count;
    3993  }
    3994  return pNewItem;
    3995 }
    3996 
    3997 template<typename T>
    3998 VmaListItem<T>* VmaRawList<T>::PushFront()
    3999 {
    4000  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4001  pNewItem->pPrev = VMA_NULL;
    4002  if(IsEmpty())
    4003  {
    4004  pNewItem->pNext = VMA_NULL;
    4005  m_pFront = pNewItem;
    4006  m_pBack = pNewItem;
    4007  m_Count = 1;
    4008  }
    4009  else
    4010  {
    4011  pNewItem->pNext = m_pFront;
    4012  m_pFront->pPrev = pNewItem;
    4013  m_pFront = pNewItem;
    4014  ++m_Count;
    4015  }
    4016  return pNewItem;
    4017 }
    4018 
    4019 template<typename T>
    4020 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4021 {
    4022  ItemType* const pNewItem = PushBack();
    4023  pNewItem->Value = value;
    4024  return pNewItem;
    4025 }
    4026 
    4027 template<typename T>
    4028 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4029 {
    4030  ItemType* const pNewItem = PushFront();
    4031  pNewItem->Value = value;
    4032  return pNewItem;
    4033 }
    4034 
    4035 template<typename T>
    4036 void VmaRawList<T>::PopBack()
    4037 {
    4038  VMA_HEAVY_ASSERT(m_Count > 0);
    4039  ItemType* const pBackItem = m_pBack;
    4040  ItemType* const pPrevItem = pBackItem->pPrev;
    4041  if(pPrevItem != VMA_NULL)
    4042  {
    4043  pPrevItem->pNext = VMA_NULL;
    4044  }
    4045  m_pBack = pPrevItem;
    4046  m_ItemAllocator.Free(pBackItem);
    4047  --m_Count;
    4048 }
    4049 
    4050 template<typename T>
    4051 void VmaRawList<T>::PopFront()
    4052 {
    4053  VMA_HEAVY_ASSERT(m_Count > 0);
    4054  ItemType* const pFrontItem = m_pFront;
    4055  ItemType* const pNextItem = pFrontItem->pNext;
    4056  if(pNextItem != VMA_NULL)
    4057  {
    4058  pNextItem->pPrev = VMA_NULL;
    4059  }
    4060  m_pFront = pNextItem;
    4061  m_ItemAllocator.Free(pFrontItem);
    4062  --m_Count;
    4063 }
    4064 
    4065 template<typename T>
    4066 void VmaRawList<T>::Remove(ItemType* pItem)
    4067 {
    4068  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4069  VMA_HEAVY_ASSERT(m_Count > 0);
    4070 
    4071  if(pItem->pPrev != VMA_NULL)
    4072  {
    4073  pItem->pPrev->pNext = pItem->pNext;
    4074  }
    4075  else
    4076  {
    4077  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4078  m_pFront = pItem->pNext;
    4079  }
    4080 
    4081  if(pItem->pNext != VMA_NULL)
    4082  {
    4083  pItem->pNext->pPrev = pItem->pPrev;
    4084  }
    4085  else
    4086  {
    4087  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4088  m_pBack = pItem->pPrev;
    4089  }
    4090 
    4091  m_ItemAllocator.Free(pItem);
    4092  --m_Count;
    4093 }
    4094 
    4095 template<typename T>
    4096 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4097 {
    4098  if(pItem != VMA_NULL)
    4099  {
    4100  ItemType* const prevItem = pItem->pPrev;
    4101  ItemType* const newItem = m_ItemAllocator.Alloc();
    4102  newItem->pPrev = prevItem;
    4103  newItem->pNext = pItem;
    4104  pItem->pPrev = newItem;
    4105  if(prevItem != VMA_NULL)
    4106  {
    4107  prevItem->pNext = newItem;
    4108  }
    4109  else
    4110  {
    4111  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4112  m_pFront = newItem;
    4113  }
    4114  ++m_Count;
    4115  return newItem;
    4116  }
    4117  else
    4118  return PushBack();
    4119 }
    4120 
    4121 template<typename T>
    4122 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4123 {
    4124  if(pItem != VMA_NULL)
    4125  {
    4126  ItemType* const nextItem = pItem->pNext;
    4127  ItemType* const newItem = m_ItemAllocator.Alloc();
    4128  newItem->pNext = nextItem;
    4129  newItem->pPrev = pItem;
    4130  pItem->pNext = newItem;
    4131  if(nextItem != VMA_NULL)
    4132  {
    4133  nextItem->pPrev = newItem;
    4134  }
    4135  else
    4136  {
    4137  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4138  m_pBack = newItem;
    4139  }
    4140  ++m_Count;
    4141  return newItem;
    4142  }
    4143  else
    4144  return PushFront();
    4145 }
    4146 
    4147 template<typename T>
    4148 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4149 {
    4150  ItemType* const newItem = InsertBefore(pItem);
    4151  newItem->Value = value;
    4152  return newItem;
    4153 }
    4154 
    4155 template<typename T>
    4156 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4157 {
    4158  ItemType* const newItem = InsertAfter(pItem);
    4159  newItem->Value = value;
    4160  return newItem;
    4161 }
    4162 
    4163 template<typename T, typename AllocatorT>
    4164 class VmaList
    4165 {
    4166  VMA_CLASS_NO_COPY(VmaList)
    4167 public:
    4168  class iterator
    4169  {
    4170  public:
    4171  iterator() :
    4172  m_pList(VMA_NULL),
    4173  m_pItem(VMA_NULL)
    4174  {
    4175  }
    4176 
    4177  T& operator*() const
    4178  {
    4179  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4180  return m_pItem->Value;
    4181  }
    4182  T* operator->() const
    4183  {
    4184  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4185  return &m_pItem->Value;
    4186  }
    4187 
    4188  iterator& operator++()
    4189  {
    4190  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4191  m_pItem = m_pItem->pNext;
    4192  return *this;
    4193  }
    4194  iterator& operator--()
    4195  {
    4196  if(m_pItem != VMA_NULL)
    4197  {
    4198  m_pItem = m_pItem->pPrev;
    4199  }
    4200  else
    4201  {
    4202  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4203  m_pItem = m_pList->Back();
    4204  }
    4205  return *this;
    4206  }
    4207 
    4208  iterator operator++(int)
    4209  {
    4210  iterator result = *this;
    4211  ++*this;
    4212  return result;
    4213  }
    4214  iterator operator--(int)
    4215  {
    4216  iterator result = *this;
    4217  --*this;
    4218  return result;
    4219  }
    4220 
    4221  bool operator==(const iterator& rhs) const
    4222  {
    4223  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4224  return m_pItem == rhs.m_pItem;
    4225  }
    4226  bool operator!=(const iterator& rhs) const
    4227  {
    4228  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4229  return m_pItem != rhs.m_pItem;
    4230  }
    4231 
    4232  private:
    4233  VmaRawList<T>* m_pList;
    4234  VmaListItem<T>* m_pItem;
    4235 
    4236  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4237  m_pList(pList),
    4238  m_pItem(pItem)
    4239  {
    4240  }
    4241 
    4242  friend class VmaList<T, AllocatorT>;
    4243  };
    4244 
    4245  class const_iterator
    4246  {
    4247  public:
    4248  const_iterator() :
    4249  m_pList(VMA_NULL),
    4250  m_pItem(VMA_NULL)
    4251  {
    4252  }
    4253 
    4254  const_iterator(const iterator& src) :
    4255  m_pList(src.m_pList),
    4256  m_pItem(src.m_pItem)
    4257  {
    4258  }
    4259 
    4260  const T& operator*() const
    4261  {
    4262  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4263  return m_pItem->Value;
    4264  }
    4265  const T* operator->() const
    4266  {
    4267  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4268  return &m_pItem->Value;
    4269  }
    4270 
    4271  const_iterator& operator++()
    4272  {
    4273  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4274  m_pItem = m_pItem->pNext;
    4275  return *this;
    4276  }
    4277  const_iterator& operator--()
    4278  {
    4279  if(m_pItem != VMA_NULL)
    4280  {
    4281  m_pItem = m_pItem->pPrev;
    4282  }
    4283  else
    4284  {
    4285  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4286  m_pItem = m_pList->Back();
    4287  }
    4288  return *this;
    4289  }
    4290 
    4291  const_iterator operator++(int)
    4292  {
    4293  const_iterator result = *this;
    4294  ++*this;
    4295  return result;
    4296  }
    4297  const_iterator operator--(int)
    4298  {
    4299  const_iterator result = *this;
    4300  --*this;
    4301  return result;
    4302  }
    4303 
    4304  bool operator==(const const_iterator& rhs) const
    4305  {
    4306  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4307  return m_pItem == rhs.m_pItem;
    4308  }
    4309  bool operator!=(const const_iterator& rhs) const
    4310  {
    4311  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4312  return m_pItem != rhs.m_pItem;
    4313  }
    4314 
    4315  private:
    4316  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4317  m_pList(pList),
    4318  m_pItem(pItem)
    4319  {
    4320  }
    4321 
    4322  const VmaRawList<T>* m_pList;
    4323  const VmaListItem<T>* m_pItem;
    4324 
    4325  friend class VmaList<T, AllocatorT>;
    4326  };
    4327 
    4328  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4329 
    4330  bool empty() const { return m_RawList.IsEmpty(); }
    4331  size_t size() const { return m_RawList.GetCount(); }
    4332 
    4333  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4334  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4335 
    4336  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4337  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4338 
    4339  void clear() { m_RawList.Clear(); }
    4340  void push_back(const T& value) { m_RawList.PushBack(value); }
    4341  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4342  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4343 
    4344 private:
    4345  VmaRawList<T> m_RawList;
    4346 };
    4347 
    4348 #endif // #if VMA_USE_STL_LIST
    4349 
    4351 // class VmaMap
    4352 
    4353 // Unused in this version.
    4354 #if 0
    4355 
    4356 #if VMA_USE_STL_UNORDERED_MAP
    4357 
    4358 #define VmaPair std::pair
    4359 
    4360 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4361  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4362 
    4363 #else // #if VMA_USE_STL_UNORDERED_MAP
    4364 
    4365 template<typename T1, typename T2>
    4366 struct VmaPair
    4367 {
    4368  T1 first;
    4369  T2 second;
    4370 
    4371  VmaPair() : first(), second() { }
    4372  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4373 };
    4374 
    4375 /* Class compatible with subset of interface of std::unordered_map.
    4376 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4377 */
    4378 template<typename KeyT, typename ValueT>
    4379 class VmaMap
    4380 {
    4381 public:
    4382  typedef VmaPair<KeyT, ValueT> PairType;
    4383  typedef PairType* iterator;
    4384 
    4385  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4386 
    4387  iterator begin() { return m_Vector.begin(); }
    4388  iterator end() { return m_Vector.end(); }
    4389 
    4390  void insert(const PairType& pair);
    4391  iterator find(const KeyT& key);
    4392  void erase(iterator it);
    4393 
    4394 private:
    4395  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4396 };
    4397 
    4398 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4399 
    4400 template<typename FirstT, typename SecondT>
    4401 struct VmaPairFirstLess
    4402 {
    4403  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4404  {
    4405  return lhs.first < rhs.first;
    4406  }
    4407  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4408  {
    4409  return lhs.first < rhsFirst;
    4410  }
    4411 };
    4412 
    4413 template<typename KeyT, typename ValueT>
    4414 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4415 {
    4416  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4417  m_Vector.data(),
    4418  m_Vector.data() + m_Vector.size(),
    4419  pair,
    4420  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4421  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4422 }
    4423 
    4424 template<typename KeyT, typename ValueT>
    4425 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4426 {
    4427  PairType* it = VmaBinaryFindFirstNotLess(
    4428  m_Vector.data(),
    4429  m_Vector.data() + m_Vector.size(),
    4430  key,
    4431  VmaPairFirstLess<KeyT, ValueT>());
    4432  if((it != m_Vector.end()) && (it->first == key))
    4433  {
    4434  return it;
    4435  }
    4436  else
    4437  {
    4438  return m_Vector.end();
    4439  }
    4440 }
    4441 
    4442 template<typename KeyT, typename ValueT>
    4443 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4444 {
    4445  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4446 }
    4447 
    4448 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4449 
    4450 #endif // #if 0
    4451 
    4453 
    4454 class VmaDeviceMemoryBlock;
    4455 
    4456 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4457 
    4458 struct VmaAllocation_T
    4459 {
    4460  VMA_CLASS_NO_COPY(VmaAllocation_T)
    4461 private:
    4462  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4463 
    4464  enum FLAGS
    4465  {
    4466  FLAG_USER_DATA_STRING = 0x01,
    4467  };
    4468 
    4469 public:
    4470  enum ALLOCATION_TYPE
    4471  {
    4472  ALLOCATION_TYPE_NONE,
    4473  ALLOCATION_TYPE_BLOCK,
    4474  ALLOCATION_TYPE_DEDICATED,
    4475  };
    4476 
    4477  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
    4478  m_Alignment(1),
    4479  m_Size(0),
    4480  m_pUserData(VMA_NULL),
    4481  m_LastUseFrameIndex(currentFrameIndex),
    4482  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
    4483  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
    4484  m_MapCount(0),
    4485  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
    4486  {
    4487 #if VMA_STATS_STRING_ENABLED
    4488  m_CreationFrameIndex = currentFrameIndex;
    4489  m_BufferImageUsage = 0;
    4490 #endif
    4491  }
    4492 
    4493  ~VmaAllocation_T()
    4494  {
    4495  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4496 
    4497  // Check if owned string was freed.
    4498  VMA_ASSERT(m_pUserData == VMA_NULL);
    4499  }
    4500 
    4501  void InitBlockAllocation(
    4502  VmaPool hPool,
    4503  VmaDeviceMemoryBlock* block,
    4504  VkDeviceSize offset,
    4505  VkDeviceSize alignment,
    4506  VkDeviceSize size,
    4507  VmaSuballocationType suballocationType,
    4508  bool mapped,
    4509  bool canBecomeLost)
    4510  {
    4511  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4512  VMA_ASSERT(block != VMA_NULL);
    4513  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4514  m_Alignment = alignment;
    4515  m_Size = size;
    4516  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4517  m_SuballocationType = (uint8_t)suballocationType;
    4518  m_BlockAllocation.m_hPool = hPool;
    4519  m_BlockAllocation.m_Block = block;
    4520  m_BlockAllocation.m_Offset = offset;
    4521  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4522  }
    4523 
    4524  void InitLost()
    4525  {
    4526  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4527  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4528  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4529  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
    4530  m_BlockAllocation.m_Block = VMA_NULL;
    4531  m_BlockAllocation.m_Offset = 0;
    4532  m_BlockAllocation.m_CanBecomeLost = true;
    4533  }
    4534 
    4535  void ChangeBlockAllocation(
    4536  VmaAllocator hAllocator,
    4537  VmaDeviceMemoryBlock* block,
    4538  VkDeviceSize offset);
    4539 
    4540  void ChangeSize(VkDeviceSize newSize);
    4541 
    4542  // pMappedData not null means allocation is created with MAPPED flag.
    4543  void InitDedicatedAllocation(
    4544  uint32_t memoryTypeIndex,
    4545  VkDeviceMemory hMemory,
    4546  VmaSuballocationType suballocationType,
    4547  void* pMappedData,
    4548  VkDeviceSize size)
    4549  {
    4550  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4551  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    4552  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    4553  m_Alignment = 0;
    4554  m_Size = size;
    4555  m_SuballocationType = (uint8_t)suballocationType;
    4556  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4557  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    4558  m_DedicatedAllocation.m_hMemory = hMemory;
    4559  m_DedicatedAllocation.m_pMappedData = pMappedData;
    4560  }
    4561 
    4562  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    4563  VkDeviceSize GetAlignment() const { return m_Alignment; }
    4564  VkDeviceSize GetSize() const { return m_Size; }
    4565  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    4566  void* GetUserData() const { return m_pUserData; }
    4567  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    4568  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    4569 
    4570  VmaDeviceMemoryBlock* GetBlock() const
    4571  {
    4572  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    4573  return m_BlockAllocation.m_Block;
    4574  }
    4575  VkDeviceSize GetOffset() const;
    4576  VkDeviceMemory GetMemory() const;
    4577  uint32_t GetMemoryTypeIndex() const;
    4578  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    4579  void* GetMappedData() const;
    4580  bool CanBecomeLost() const;
    4581  VmaPool GetPool() const;
    4582 
    4583  uint32_t GetLastUseFrameIndex() const
    4584  {
    4585  return m_LastUseFrameIndex.load();
    4586  }
    4587  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    4588  {
    4589  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    4590  }
    4591  /*
    4592  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    4593  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    4594  - Else, returns false.
    4595 
    4596  If hAllocation is already lost, assert - you should not call it then.
    4597  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    4598  */
    4599  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4600 
    4601  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    4602  {
    4603  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    4604  outInfo.blockCount = 1;
    4605  outInfo.allocationCount = 1;
    4606  outInfo.unusedRangeCount = 0;
    4607  outInfo.usedBytes = m_Size;
    4608  outInfo.unusedBytes = 0;
    4609  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    4610  outInfo.unusedRangeSizeMin = UINT64_MAX;
    4611  outInfo.unusedRangeSizeMax = 0;
    4612  }
    4613 
    4614  void BlockAllocMap();
    4615  void BlockAllocUnmap();
    4616  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    4617  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    4618 
    4619 #if VMA_STATS_STRING_ENABLED
    4620  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    4621  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    4622 
    4623  void InitBufferImageUsage(uint32_t bufferImageUsage)
    4624  {
    4625  VMA_ASSERT(m_BufferImageUsage == 0);
    4626  m_BufferImageUsage = bufferImageUsage;
    4627  }
    4628 
    4629  void PrintParameters(class VmaJsonWriter& json) const;
    4630 #endif
    4631 
    4632 private:
    4633  VkDeviceSize m_Alignment;
    4634  VkDeviceSize m_Size;
    4635  void* m_pUserData;
    4636  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    4637  uint8_t m_Type; // ALLOCATION_TYPE
    4638  uint8_t m_SuballocationType; // VmaSuballocationType
    4639  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    4640  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    4641  uint8_t m_MapCount;
    4642  uint8_t m_Flags; // enum FLAGS
    4643 
    4644  // Allocation out of VmaDeviceMemoryBlock.
    4645  struct BlockAllocation
    4646  {
    4647  VmaPool m_hPool; // Null if belongs to general memory.
    4648  VmaDeviceMemoryBlock* m_Block;
    4649  VkDeviceSize m_Offset;
    4650  bool m_CanBecomeLost;
    4651  };
    4652 
    4653  // Allocation for an object that has its own private VkDeviceMemory.
    4654  struct DedicatedAllocation
    4655  {
    4656  uint32_t m_MemoryTypeIndex;
    4657  VkDeviceMemory m_hMemory;
    4658  void* m_pMappedData; // Not null means memory is mapped.
    4659  };
    4660 
    4661  union
    4662  {
    4663  // Allocation out of VmaDeviceMemoryBlock.
    4664  BlockAllocation m_BlockAllocation;
    4665  // Allocation for an object that has its own private VkDeviceMemory.
    4666  DedicatedAllocation m_DedicatedAllocation;
    4667  };
    4668 
    4669 #if VMA_STATS_STRING_ENABLED
    4670  uint32_t m_CreationFrameIndex;
    4671  uint32_t m_BufferImageUsage; // 0 if unknown.
    4672 #endif
    4673 
    4674  void FreeUserDataString(VmaAllocator hAllocator);
    4675 };
    4676 
    4677 /*
    4678 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    4679 allocated memory block or free.
    4680 */
    4681 struct VmaSuballocation
    4682 {
    4683  VkDeviceSize offset;
    4684  VkDeviceSize size;
    4685  VmaAllocation hAllocation;
    4686  VmaSuballocationType type;
    4687 };
    4688 
    4689 // Comparator for offsets.
    4690 struct VmaSuballocationOffsetLess
    4691 {
    4692  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4693  {
    4694  return lhs.offset < rhs.offset;
    4695  }
    4696 };
    4697 struct VmaSuballocationOffsetGreater
    4698 {
    4699  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    4700  {
    4701  return lhs.offset > rhs.offset;
    4702  }
    4703 };
    4704 
    4705 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    4706 
    4707 // Cost of one additional allocation lost, as equivalent in bytes.
    4708 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    4709 
    4710 /*
    4711 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    4712 
    4713 If canMakeOtherLost was false:
    4714 - item points to a FREE suballocation.
    4715 - itemsToMakeLostCount is 0.
    4716 
    4717 If canMakeOtherLost was true:
    4718 - item points to first of sequence of suballocations, which are either FREE,
    4719  or point to VmaAllocations that can become lost.
    4720 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    4721  the requested allocation to succeed.
    4722 */
    4723 struct VmaAllocationRequest
    4724 {
    4725  VkDeviceSize offset;
    4726  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    4727  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    4728  VmaSuballocationList::iterator item;
    4729  size_t itemsToMakeLostCount;
    4730  void* customData;
    4731 
    4732  VkDeviceSize CalcCost() const
    4733  {
    4734  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    4735  }
    4736 };
    4737 
    4738 /*
    4739 Data structure used for bookkeeping of allocations and unused ranges of memory
    4740 in a single VkDeviceMemory block.
    4741 */
    4742 class VmaBlockMetadata
    4743 {
    4744 public:
    4745  VmaBlockMetadata(VmaAllocator hAllocator);
    4746  virtual ~VmaBlockMetadata() { }
    4747  virtual void Init(VkDeviceSize size) { m_Size = size; }
    4748 
    4749  // Validates all data structures inside this object. If not valid, returns false.
    4750  virtual bool Validate() const = 0;
    4751  VkDeviceSize GetSize() const { return m_Size; }
    4752  virtual size_t GetAllocationCount() const = 0;
    4753  virtual VkDeviceSize GetSumFreeSize() const = 0;
    4754  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    4755  // Returns true if this block is empty - contains only single free suballocation.
    4756  virtual bool IsEmpty() const = 0;
    4757 
    4758  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    4759  // Shouldn't modify blockCount.
    4760  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    4761 
    4762 #if VMA_STATS_STRING_ENABLED
    4763  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    4764 #endif
    4765 
    4766  // Tries to find a place for suballocation with given parameters inside this block.
    4767  // If succeeded, fills pAllocationRequest and returns true.
    4768  // If failed, returns false.
    4769  virtual bool CreateAllocationRequest(
    4770  uint32_t currentFrameIndex,
    4771  uint32_t frameInUseCount,
    4772  VkDeviceSize bufferImageGranularity,
    4773  VkDeviceSize allocSize,
    4774  VkDeviceSize allocAlignment,
    4775  bool upperAddress,
    4776  VmaSuballocationType allocType,
    4777  bool canMakeOtherLost,
    4778  uint32_t strategy, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* flags.
    4779  VmaAllocationRequest* pAllocationRequest) = 0;
    4780 
    4781  virtual bool MakeRequestedAllocationsLost(
    4782  uint32_t currentFrameIndex,
    4783  uint32_t frameInUseCount,
    4784  VmaAllocationRequest* pAllocationRequest) = 0;
    4785 
    4786  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    4787 
    4788  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    4789 
    4790  // Makes actual allocation based on request. Request must already be checked and valid.
    4791  virtual void Alloc(
    4792  const VmaAllocationRequest& request,
    4793  VmaSuballocationType type,
    4794  VkDeviceSize allocSize,
    4795  bool upperAddress,
    4796  VmaAllocation hAllocation) = 0;
    4797 
    4798  // Frees suballocation assigned to given memory region.
    4799  virtual void Free(const VmaAllocation allocation) = 0;
    4800  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    4801 
    4802  // Tries to resize (grow or shrink) space for given allocation, in place.
    4803  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    4804 
    4805 protected:
    4806  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    4807 
    4808 #if VMA_STATS_STRING_ENABLED
    4809  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    4810  VkDeviceSize unusedBytes,
    4811  size_t allocationCount,
    4812  size_t unusedRangeCount) const;
    4813  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    4814  VkDeviceSize offset,
    4815  VmaAllocation hAllocation) const;
    4816  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    4817  VkDeviceSize offset,
    4818  VkDeviceSize size) const;
    4819  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    4820 #endif
    4821 
    4822 private:
    4823  VkDeviceSize m_Size;
    4824  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4825 };
    4826 
    4827 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    4828  VMA_ASSERT(0 && "Validation failed: " #cond); \
    4829  return false; \
    4830  } } while(false)
    4831 
    4832 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    4833 {
    4834  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    4835 public:
    4836  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    4837  virtual ~VmaBlockMetadata_Generic();
    4838  virtual void Init(VkDeviceSize size);
    4839 
    4840  virtual bool Validate() const;
    4841  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    4842  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    4843  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    4844  virtual bool IsEmpty() const;
    4845 
    4846  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    4847  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    4848 
    4849 #if VMA_STATS_STRING_ENABLED
    4850  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    4851 #endif
    4852 
    4853  virtual bool CreateAllocationRequest(
    4854  uint32_t currentFrameIndex,
    4855  uint32_t frameInUseCount,
    4856  VkDeviceSize bufferImageGranularity,
    4857  VkDeviceSize allocSize,
    4858  VkDeviceSize allocAlignment,
    4859  bool upperAddress,
    4860  VmaSuballocationType allocType,
    4861  bool canMakeOtherLost,
    4862  uint32_t strategy,
    4863  VmaAllocationRequest* pAllocationRequest);
    4864 
    4865  virtual bool MakeRequestedAllocationsLost(
    4866  uint32_t currentFrameIndex,
    4867  uint32_t frameInUseCount,
    4868  VmaAllocationRequest* pAllocationRequest);
    4869 
    4870  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    4871 
    4872  virtual VkResult CheckCorruption(const void* pBlockData);
    4873 
    4874  virtual void Alloc(
    4875  const VmaAllocationRequest& request,
    4876  VmaSuballocationType type,
    4877  VkDeviceSize allocSize,
    4878  bool upperAddress,
    4879  VmaAllocation hAllocation);
    4880 
    4881  virtual void Free(const VmaAllocation allocation);
    4882  virtual void FreeAtOffset(VkDeviceSize offset);
    4883 
    4884  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    4885 
    4886 private:
    4887  uint32_t m_FreeCount;
    4888  VkDeviceSize m_SumFreeSize;
    4889  VmaSuballocationList m_Suballocations;
    4890  // Suballocations that are free and have size greater than certain threshold.
    4891  // Sorted by size, ascending.
    4892  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    4893 
    4894  bool ValidateFreeSuballocationList() const;
    4895 
    4896  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    4897  // If yes, fills pOffset and returns true. If no, returns false.
    4898  bool CheckAllocation(
    4899  uint32_t currentFrameIndex,
    4900  uint32_t frameInUseCount,
    4901  VkDeviceSize bufferImageGranularity,
    4902  VkDeviceSize allocSize,
    4903  VkDeviceSize allocAlignment,
    4904  VmaSuballocationType allocType,
    4905  VmaSuballocationList::const_iterator suballocItem,
    4906  bool canMakeOtherLost,
    4907  VkDeviceSize* pOffset,
    4908  size_t* itemsToMakeLostCount,
    4909  VkDeviceSize* pSumFreeSize,
    4910  VkDeviceSize* pSumItemSize) const;
    4911  // Given free suballocation, it merges it with following one, which must also be free.
    4912  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    4913  // Releases given suballocation, making it free.
    4914  // Merges it with adjacent free suballocations if applicable.
    4915  // Returns iterator to new free suballocation at this place.
    4916  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    4917  // Given free suballocation, it inserts it into sorted list of
    4918  // m_FreeSuballocationsBySize if it's suitable.
    4919  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    4920  // Given free suballocation, it removes it from sorted list of
    4921  // m_FreeSuballocationsBySize if it's suitable.
    4922  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    4923 };
    4924 
    4925 /*
    4926 Allocations and their references in internal data structure look like this:
    4927 
    4928 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    4929 
    4930  0 +-------+
    4931  | |
    4932  | |
    4933  | |
    4934  +-------+
    4935  | Alloc | 1st[m_1stNullItemsBeginCount]
    4936  +-------+
    4937  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4938  +-------+
    4939  | ... |
    4940  +-------+
    4941  | Alloc | 1st[1st.size() - 1]
    4942  +-------+
    4943  | |
    4944  | |
    4945  | |
    4946 GetSize() +-------+
    4947 
    4948 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    4949 
    4950  0 +-------+
    4951  | Alloc | 2nd[0]
    4952  +-------+
    4953  | Alloc | 2nd[1]
    4954  +-------+
    4955  | ... |
    4956  +-------+
    4957  | Alloc | 2nd[2nd.size() - 1]
    4958  +-------+
    4959  | |
    4960  | |
    4961  | |
    4962  +-------+
    4963  | Alloc | 1st[m_1stNullItemsBeginCount]
    4964  +-------+
    4965  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4966  +-------+
    4967  | ... |
    4968  +-------+
    4969  | Alloc | 1st[1st.size() - 1]
    4970  +-------+
    4971  | |
    4972 GetSize() +-------+
    4973 
    4974 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    4975 
    4976  0 +-------+
    4977  | |
    4978  | |
    4979  | |
    4980  +-------+
    4981  | Alloc | 1st[m_1stNullItemsBeginCount]
    4982  +-------+
    4983  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    4984  +-------+
    4985  | ... |
    4986  +-------+
    4987  | Alloc | 1st[1st.size() - 1]
    4988  +-------+
    4989  | |
    4990  | |
    4991  | |
    4992  +-------+
    4993  | Alloc | 2nd[2nd.size() - 1]
    4994  +-------+
    4995  | ... |
    4996  +-------+
    4997  | Alloc | 2nd[1]
    4998  +-------+
    4999  | Alloc | 2nd[0]
    5000 GetSize() +-------+
    5001 
    5002 */
    5003 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5004 {
    5005  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5006 public:
    5007  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5008  virtual ~VmaBlockMetadata_Linear();
    5009  virtual void Init(VkDeviceSize size);
    5010 
    5011  virtual bool Validate() const;
    5012  virtual size_t GetAllocationCount() const;
    5013  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5014  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5015  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5016 
    5017  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5018  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5019 
    5020 #if VMA_STATS_STRING_ENABLED
    5021  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5022 #endif
    5023 
    5024  virtual bool CreateAllocationRequest(
    5025  uint32_t currentFrameIndex,
    5026  uint32_t frameInUseCount,
    5027  VkDeviceSize bufferImageGranularity,
    5028  VkDeviceSize allocSize,
    5029  VkDeviceSize allocAlignment,
    5030  bool upperAddress,
    5031  VmaSuballocationType allocType,
    5032  bool canMakeOtherLost,
    5033  uint32_t strategy,
    5034  VmaAllocationRequest* pAllocationRequest);
    5035 
    5036  virtual bool MakeRequestedAllocationsLost(
    5037  uint32_t currentFrameIndex,
    5038  uint32_t frameInUseCount,
    5039  VmaAllocationRequest* pAllocationRequest);
    5040 
    5041  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5042 
    5043  virtual VkResult CheckCorruption(const void* pBlockData);
    5044 
    5045  virtual void Alloc(
    5046  const VmaAllocationRequest& request,
    5047  VmaSuballocationType type,
    5048  VkDeviceSize allocSize,
    5049  bool upperAddress,
    5050  VmaAllocation hAllocation);
    5051 
    5052  virtual void Free(const VmaAllocation allocation);
    5053  virtual void FreeAtOffset(VkDeviceSize offset);
    5054 
    5055 private:
    5056  /*
    5057  There are two suballocation vectors, used in ping-pong way.
    5058  The one with index m_1stVectorIndex is called 1st.
    5059  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5060  2nd can be non-empty only when 1st is not empty.
    5061  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5062  */
    5063  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5064 
    5065  enum SECOND_VECTOR_MODE
    5066  {
    5067  SECOND_VECTOR_EMPTY,
    5068  /*
    5069  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5070  all have smaller offset.
    5071  */
    5072  SECOND_VECTOR_RING_BUFFER,
    5073  /*
    5074  Suballocations in 2nd vector are upper side of double stack.
    5075  They all have offsets higher than those in 1st vector.
    5076  Top of this stack means smaller offsets, but higher indices in this vector.
    5077  */
    5078  SECOND_VECTOR_DOUBLE_STACK,
    5079  };
    5080 
    5081  VkDeviceSize m_SumFreeSize;
    5082  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5083  uint32_t m_1stVectorIndex;
    5084  SECOND_VECTOR_MODE m_2ndVectorMode;
    5085 
    5086  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5087  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5088  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5089  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5090 
    5091  // Number of items in 1st vector with hAllocation = null at the beginning.
    5092  size_t m_1stNullItemsBeginCount;
    5093  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5094  size_t m_1stNullItemsMiddleCount;
    5095  // Number of items in 2nd vector with hAllocation = null.
    5096  size_t m_2ndNullItemsCount;
    5097 
    5098  bool ShouldCompact1st() const;
    5099  void CleanupAfterFree();
    5100 };
    5101 
    5102 /*
    5103 - GetSize() is the original size of allocated memory block.
    5104 - m_UsableSize is this size aligned down to a power of two.
    5105  All allocations and calculations happen relative to m_UsableSize.
    5106 - GetUnusableSize() is the difference between them.
    5107  It is repoted as separate, unused range, not available for allocations.
    5108 
    5109 Node at level 0 has size = m_UsableSize.
    5110 Each next level contains nodes with size 2 times smaller than current level.
    5111 m_LevelCount is the maximum number of levels to use in the current object.
    5112 */
    5113 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5114 {
    5115  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5116 public:
    5117  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5118  virtual ~VmaBlockMetadata_Buddy();
    5119  virtual void Init(VkDeviceSize size);
    5120 
    5121  virtual bool Validate() const;
    5122  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5123  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5124  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5125  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5126 
    5127  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5128  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5129 
    5130 #if VMA_STATS_STRING_ENABLED
    5131  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5132 #endif
    5133 
    5134  virtual bool CreateAllocationRequest(
    5135  uint32_t currentFrameIndex,
    5136  uint32_t frameInUseCount,
    5137  VkDeviceSize bufferImageGranularity,
    5138  VkDeviceSize allocSize,
    5139  VkDeviceSize allocAlignment,
    5140  bool upperAddress,
    5141  VmaSuballocationType allocType,
    5142  bool canMakeOtherLost,
    5143  uint32_t strategy,
    5144  VmaAllocationRequest* pAllocationRequest);
    5145 
    5146  virtual bool MakeRequestedAllocationsLost(
    5147  uint32_t currentFrameIndex,
    5148  uint32_t frameInUseCount,
    5149  VmaAllocationRequest* pAllocationRequest);
    5150 
    5151  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5152 
    5153  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5154 
    5155  virtual void Alloc(
    5156  const VmaAllocationRequest& request,
    5157  VmaSuballocationType type,
    5158  VkDeviceSize allocSize,
    5159  bool upperAddress,
    5160  VmaAllocation hAllocation);
    5161 
    5162  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5163  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5164 
    5165 private:
    5166  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5167  static const size_t MAX_LEVELS = 30;
    5168 
    5169  struct ValidationContext
    5170  {
    5171  size_t calculatedAllocationCount;
    5172  size_t calculatedFreeCount;
    5173  VkDeviceSize calculatedSumFreeSize;
    5174 
    5175  ValidationContext() :
    5176  calculatedAllocationCount(0),
    5177  calculatedFreeCount(0),
    5178  calculatedSumFreeSize(0) { }
    5179  };
    5180 
    5181  struct Node
    5182  {
    5183  VkDeviceSize offset;
    5184  enum TYPE
    5185  {
    5186  TYPE_FREE,
    5187  TYPE_ALLOCATION,
    5188  TYPE_SPLIT,
    5189  TYPE_COUNT
    5190  } type;
    5191  Node* parent;
    5192  Node* buddy;
    5193 
    5194  union
    5195  {
    5196  struct
    5197  {
    5198  Node* prev;
    5199  Node* next;
    5200  } free;
    5201  struct
    5202  {
    5203  VmaAllocation alloc;
    5204  } allocation;
    5205  struct
    5206  {
    5207  Node* leftChild;
    5208  } split;
    5209  };
    5210  };
    5211 
    5212  // Size of the memory block aligned down to a power of two.
    5213  VkDeviceSize m_UsableSize;
    5214  uint32_t m_LevelCount;
    5215 
    5216  Node* m_Root;
    5217  struct {
    5218  Node* front;
    5219  Node* back;
    5220  } m_FreeList[MAX_LEVELS];
    5221  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5222  size_t m_AllocationCount;
    5223  // Number of nodes in the tree with type == TYPE_FREE.
    5224  size_t m_FreeCount;
    5225  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5226  VkDeviceSize m_SumFreeSize;
    5227 
    5228  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5229  void DeleteNode(Node* node);
    5230  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5231  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5232  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5233  // Alloc passed just for validation. Can be null.
    5234  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5235  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5236  // Adds node to the front of FreeList at given level.
    5237  // node->type must be FREE.
    5238  // node->free.prev, next can be undefined.
    5239  void AddToFreeListFront(uint32_t level, Node* node);
    5240  // Removes node from FreeList at given level.
    5241  // node->type must be FREE.
    5242  // node->free.prev, next stay untouched.
    5243  void RemoveFromFreeList(uint32_t level, Node* node);
    5244 
    5245 #if VMA_STATS_STRING_ENABLED
    5246  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5247 #endif
    5248 };
    5249 
    5250 /*
    5251 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5252 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5253 
    5254 Thread-safety: This class must be externally synchronized.
    5255 */
    5256 class VmaDeviceMemoryBlock
    5257 {
    5258  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5259 public:
    5260  VmaBlockMetadata* m_pMetadata;
    5261 
    5262  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5263 
    5264  ~VmaDeviceMemoryBlock()
    5265  {
    5266  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5267  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5268  }
    5269 
    5270  // Always call after construction.
    5271  void Init(
    5272  VmaAllocator hAllocator,
    5273  uint32_t newMemoryTypeIndex,
    5274  VkDeviceMemory newMemory,
    5275  VkDeviceSize newSize,
    5276  uint32_t id,
    5277  uint32_t algorithm);
    5278  // Always call before destruction.
    5279  void Destroy(VmaAllocator allocator);
    5280 
    5281  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5282  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5283  uint32_t GetId() const { return m_Id; }
    5284  void* GetMappedData() const { return m_pMappedData; }
    5285 
    5286  // Validates all data structures inside this object. If not valid, returns false.
    5287  bool Validate() const;
    5288 
    5289  VkResult CheckCorruption(VmaAllocator hAllocator);
    5290 
    5291  // ppData can be null.
    5292  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5293  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5294 
    5295  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5296  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5297 
    5298  VkResult BindBufferMemory(
    5299  const VmaAllocator hAllocator,
    5300  const VmaAllocation hAllocation,
    5301  VkBuffer hBuffer);
    5302  VkResult BindImageMemory(
    5303  const VmaAllocator hAllocator,
    5304  const VmaAllocation hAllocation,
    5305  VkImage hImage);
    5306 
    5307 private:
    5308  uint32_t m_MemoryTypeIndex;
    5309  uint32_t m_Id;
    5310  VkDeviceMemory m_hMemory;
    5311 
    5312  // Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5313  // Also protects m_MapCount, m_pMappedData.
    5314  VMA_MUTEX m_Mutex;
    5315  uint32_t m_MapCount;
    5316  void* m_pMappedData;
    5317 };
    5318 
    5319 struct VmaPointerLess
    5320 {
    5321  bool operator()(const void* lhs, const void* rhs) const
    5322  {
    5323  return lhs < rhs;
    5324  }
    5325 };
    5326 
    5327 class VmaDefragmentator;
    5328 
    5329 /*
    5330 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5331 Vulkan memory type.
    5332 
    5333 Synchronized internally with a mutex.
    5334 */
    5335 struct VmaBlockVector
    5336 {
    5337  VMA_CLASS_NO_COPY(VmaBlockVector)
    5338 public:
    5339  VmaBlockVector(
    5340  VmaAllocator hAllocator,
    5341  uint32_t memoryTypeIndex,
    5342  VkDeviceSize preferredBlockSize,
    5343  size_t minBlockCount,
    5344  size_t maxBlockCount,
    5345  VkDeviceSize bufferImageGranularity,
    5346  uint32_t frameInUseCount,
    5347  bool isCustomPool,
    5348  bool explicitBlockSize,
    5349  uint32_t algorithm);
    5350  ~VmaBlockVector();
    5351 
    5352  VkResult CreateMinBlocks();
    5353 
    5354  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5355  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5356  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5357  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5358  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5359 
    5360  void GetPoolStats(VmaPoolStats* pStats);
    5361 
    5362  bool IsEmpty() const { return m_Blocks.empty(); }
    5363  bool IsCorruptionDetectionEnabled() const;
    5364 
    5365  VkResult Allocate(
    5366  VmaPool hCurrentPool,
    5367  uint32_t currentFrameIndex,
    5368  VkDeviceSize size,
    5369  VkDeviceSize alignment,
    5370  const VmaAllocationCreateInfo& createInfo,
    5371  VmaSuballocationType suballocType,
    5372  VmaAllocation* pAllocation);
    5373 
    5374  void Free(
    5375  VmaAllocation hAllocation);
    5376 
    5377  // Adds statistics of this BlockVector to pStats.
    5378  void AddStats(VmaStats* pStats);
    5379 
    5380 #if VMA_STATS_STRING_ENABLED
    5381  void PrintDetailedMap(class VmaJsonWriter& json);
    5382 #endif
    5383 
    5384  void MakePoolAllocationsLost(
    5385  uint32_t currentFrameIndex,
    5386  size_t* pLostAllocationCount);
    5387  VkResult CheckCorruption();
    5388 
    5389  VmaDefragmentator* EnsureDefragmentator(
    5390  VmaAllocator hAllocator,
    5391  uint32_t currentFrameIndex);
    5392 
    5393  VkResult Defragment(
    5394  VmaDefragmentationStats* pDefragmentationStats,
    5395  VkDeviceSize& maxBytesToMove,
    5396  uint32_t& maxAllocationsToMove);
    5397 
    5398  void DestroyDefragmentator();
    5399 
    5400 private:
    5401  friend class VmaDefragmentator;
    5402 
    5403  const VmaAllocator m_hAllocator;
    5404  const uint32_t m_MemoryTypeIndex;
    5405  const VkDeviceSize m_PreferredBlockSize;
    5406  const size_t m_MinBlockCount;
    5407  const size_t m_MaxBlockCount;
    5408  const VkDeviceSize m_BufferImageGranularity;
    5409  const uint32_t m_FrameInUseCount;
    5410  const bool m_IsCustomPool;
    5411  const bool m_ExplicitBlockSize;
    5412  const uint32_t m_Algorithm;
    5413  bool m_HasEmptyBlock;
    5414  VMA_MUTEX m_Mutex;
    5415  // Incrementally sorted by sumFreeSize, ascending.
    5416  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5417  /* There can be at most one allocation that is completely empty - a
    5418  hysteresis to avoid pessimistic case of alternating creation and destruction
    5419  of a VkDeviceMemory. */
    5420  VmaDefragmentator* m_pDefragmentator;
    5421  uint32_t m_NextBlockId;
    5422 
    5423  VkDeviceSize CalcMaxBlockSize() const;
    5424 
    5425  // Finds and removes given block from vector.
    5426  void Remove(VmaDeviceMemoryBlock* pBlock);
    5427 
    5428  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5429  // after this call.
    5430  void IncrementallySortBlocks();
    5431 
    5432  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5433  VkResult AllocateFromBlock(
    5434  VmaDeviceMemoryBlock* pBlock,
    5435  VmaPool hCurrentPool,
    5436  uint32_t currentFrameIndex,
    5437  VkDeviceSize size,
    5438  VkDeviceSize alignment,
    5439  VmaAllocationCreateFlags allocFlags,
    5440  void* pUserData,
    5441  VmaSuballocationType suballocType,
    5442  uint32_t strategy,
    5443  VmaAllocation* pAllocation);
    5444 
    5445  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5446 };
    5447 
    5448 struct VmaPool_T
    5449 {
    5450  VMA_CLASS_NO_COPY(VmaPool_T)
    5451 public:
    5452  VmaBlockVector m_BlockVector;
    5453 
    5454  VmaPool_T(
    5455  VmaAllocator hAllocator,
    5456  const VmaPoolCreateInfo& createInfo,
    5457  VkDeviceSize preferredBlockSize);
    5458  ~VmaPool_T();
    5459 
    5460  uint32_t GetId() const { return m_Id; }
    5461  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    5462 
    5463 #if VMA_STATS_STRING_ENABLED
    5464  //void PrintDetailedMap(class VmaStringBuilder& sb);
    5465 #endif
    5466 
    5467 private:
    5468  uint32_t m_Id;
    5469 };
    5470 
    5471 class VmaDefragmentator
    5472 {
    5473  VMA_CLASS_NO_COPY(VmaDefragmentator)
    5474 private:
    5475  const VmaAllocator m_hAllocator;
    5476  VmaBlockVector* const m_pBlockVector;
    5477  uint32_t m_CurrentFrameIndex;
    5478  VkDeviceSize m_BytesMoved;
    5479  uint32_t m_AllocationsMoved;
    5480 
    5481  struct AllocationInfo
    5482  {
    5483  VmaAllocation m_hAllocation;
    5484  VkBool32* m_pChanged;
    5485 
    5486  AllocationInfo() :
    5487  m_hAllocation(VK_NULL_HANDLE),
    5488  m_pChanged(VMA_NULL)
    5489  {
    5490  }
    5491  };
    5492 
    5493  struct AllocationInfoSizeGreater
    5494  {
    5495  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    5496  {
    5497  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    5498  }
    5499  };
    5500 
    5501  // Used between AddAllocation and Defragment.
    5502  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5503 
    5504  struct BlockInfo
    5505  {
    5506  VmaDeviceMemoryBlock* m_pBlock;
    5507  bool m_HasNonMovableAllocations;
    5508  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    5509 
    5510  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    5511  m_pBlock(VMA_NULL),
    5512  m_HasNonMovableAllocations(true),
    5513  m_Allocations(pAllocationCallbacks),
    5514  m_pMappedDataForDefragmentation(VMA_NULL)
    5515  {
    5516  }
    5517 
    5518  void CalcHasNonMovableAllocations()
    5519  {
    5520  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    5521  const size_t defragmentAllocCount = m_Allocations.size();
    5522  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    5523  }
    5524 
    5525  void SortAllocationsBySizeDescecnding()
    5526  {
    5527  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    5528  }
    5529 
    5530  VkResult EnsureMapping(VmaAllocator hAllocator, void** ppMappedData);
    5531  void Unmap(VmaAllocator hAllocator);
    5532 
    5533  private:
    5534  // Not null if mapped for defragmentation only, not originally mapped.
    5535  void* m_pMappedDataForDefragmentation;
    5536  };
    5537 
    5538  struct BlockPointerLess
    5539  {
    5540  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    5541  {
    5542  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    5543  }
    5544  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5545  {
    5546  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    5547  }
    5548  };
    5549 
    5550  // 1. Blocks with some non-movable allocations go first.
    5551  // 2. Blocks with smaller sumFreeSize go first.
    5552  struct BlockInfoCompareMoveDestination
    5553  {
    5554  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    5555  {
    5556  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    5557  {
    5558  return true;
    5559  }
    5560  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    5561  {
    5562  return false;
    5563  }
    5564  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    5565  {
    5566  return true;
    5567  }
    5568  return false;
    5569  }
    5570  };
    5571 
    5572  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    5573  BlockInfoVector m_Blocks;
    5574 
    5575  VkResult DefragmentRound(
    5576  VkDeviceSize maxBytesToMove,
    5577  uint32_t maxAllocationsToMove);
    5578 
    5579  static bool MoveMakesSense(
    5580  size_t dstBlockIndex, VkDeviceSize dstOffset,
    5581  size_t srcBlockIndex, VkDeviceSize srcOffset);
    5582 
    5583 public:
    5584  VmaDefragmentator(
    5585  VmaAllocator hAllocator,
    5586  VmaBlockVector* pBlockVector,
    5587  uint32_t currentFrameIndex);
    5588 
    5589  ~VmaDefragmentator();
    5590 
    5591  VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    5592  uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    5593 
    5594  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    5595 
    5596  VkResult Defragment(
    5597  VkDeviceSize maxBytesToMove,
    5598  uint32_t maxAllocationsToMove);
    5599 };
    5600 
    5601 #if VMA_RECORDING_ENABLED
    5602 
    5603 class VmaRecorder
    5604 {
    5605 public:
    5606  VmaRecorder();
    5607  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    5608  void WriteConfiguration(
    5609  const VkPhysicalDeviceProperties& devProps,
    5610  const VkPhysicalDeviceMemoryProperties& memProps,
    5611  bool dedicatedAllocationExtensionEnabled);
    5612  ~VmaRecorder();
    5613 
    5614  void RecordCreateAllocator(uint32_t frameIndex);
    5615  void RecordDestroyAllocator(uint32_t frameIndex);
    5616  void RecordCreatePool(uint32_t frameIndex,
    5617  const VmaPoolCreateInfo& createInfo,
    5618  VmaPool pool);
    5619  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    5620  void RecordAllocateMemory(uint32_t frameIndex,
    5621  const VkMemoryRequirements& vkMemReq,
    5622  const VmaAllocationCreateInfo& createInfo,
    5623  VmaAllocation allocation);
    5624  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    5625  const VkMemoryRequirements& vkMemReq,
    5626  bool requiresDedicatedAllocation,
    5627  bool prefersDedicatedAllocation,
    5628  const VmaAllocationCreateInfo& createInfo,
    5629  VmaAllocation allocation);
    5630  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    5631  const VkMemoryRequirements& vkMemReq,
    5632  bool requiresDedicatedAllocation,
    5633  bool prefersDedicatedAllocation,
    5634  const VmaAllocationCreateInfo& createInfo,
    5635  VmaAllocation allocation);
    5636  void RecordFreeMemory(uint32_t frameIndex,
    5637  VmaAllocation allocation);
    5638  void RecordResizeAllocation(
    5639  uint32_t frameIndex,
    5640  VmaAllocation allocation,
    5641  VkDeviceSize newSize);
    5642  void RecordSetAllocationUserData(uint32_t frameIndex,
    5643  VmaAllocation allocation,
    5644  const void* pUserData);
    5645  void RecordCreateLostAllocation(uint32_t frameIndex,
    5646  VmaAllocation allocation);
    5647  void RecordMapMemory(uint32_t frameIndex,
    5648  VmaAllocation allocation);
    5649  void RecordUnmapMemory(uint32_t frameIndex,
    5650  VmaAllocation allocation);
    5651  void RecordFlushAllocation(uint32_t frameIndex,
    5652  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5653  void RecordInvalidateAllocation(uint32_t frameIndex,
    5654  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    5655  void RecordCreateBuffer(uint32_t frameIndex,
    5656  const VkBufferCreateInfo& bufCreateInfo,
    5657  const VmaAllocationCreateInfo& allocCreateInfo,
    5658  VmaAllocation allocation);
    5659  void RecordCreateImage(uint32_t frameIndex,
    5660  const VkImageCreateInfo& imageCreateInfo,
    5661  const VmaAllocationCreateInfo& allocCreateInfo,
    5662  VmaAllocation allocation);
    5663  void RecordDestroyBuffer(uint32_t frameIndex,
    5664  VmaAllocation allocation);
    5665  void RecordDestroyImage(uint32_t frameIndex,
    5666  VmaAllocation allocation);
    5667  void RecordTouchAllocation(uint32_t frameIndex,
    5668  VmaAllocation allocation);
    5669  void RecordGetAllocationInfo(uint32_t frameIndex,
    5670  VmaAllocation allocation);
    5671  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    5672  VmaPool pool);
    5673 
    5674 private:
    5675  struct CallParams
    5676  {
    5677  uint32_t threadId;
    5678  double time;
    5679  };
    5680 
    5681  class UserDataString
    5682  {
    5683  public:
    5684  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    5685  const char* GetString() const { return m_Str; }
    5686 
    5687  private:
    5688  char m_PtrStr[17];
    5689  const char* m_Str;
    5690  };
    5691 
    5692  bool m_UseMutex;
    5693  VmaRecordFlags m_Flags;
    5694  FILE* m_File;
    5695  VMA_MUTEX m_FileMutex;
    5696  int64_t m_Freq;
    5697  int64_t m_StartCounter;
    5698 
    5699  void GetBasicParams(CallParams& outParams);
    5700  void Flush();
    5701 };
    5702 
    5703 #endif // #if VMA_RECORDING_ENABLED
    5704 
    5705 // Main allocator object.
    5706 struct VmaAllocator_T
    5707 {
    5708  VMA_CLASS_NO_COPY(VmaAllocator_T)
    5709 public:
    5710  bool m_UseMutex;
    5711  bool m_UseKhrDedicatedAllocation;
    5712  VkDevice m_hDevice;
    5713  bool m_AllocationCallbacksSpecified;
    5714  VkAllocationCallbacks m_AllocationCallbacks;
    5715  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    5716 
    5717  // Number of bytes free out of limit, or VK_WHOLE_SIZE if not limit for that heap.
    5718  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    5719  VMA_MUTEX m_HeapSizeLimitMutex;
    5720 
    5721  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    5722  VkPhysicalDeviceMemoryProperties m_MemProps;
    5723 
    5724  // Default pools.
    5725  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    5726 
    5727  // Each vector is sorted by memory (handle value).
    5728  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    5729  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    5730  VMA_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    5731 
    5732  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    5733  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    5734  ~VmaAllocator_T();
    5735 
    5736  const VkAllocationCallbacks* GetAllocationCallbacks() const
    5737  {
    5738  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    5739  }
    5740  const VmaVulkanFunctions& GetVulkanFunctions() const
    5741  {
    5742  return m_VulkanFunctions;
    5743  }
    5744 
    5745  VkDeviceSize GetBufferImageGranularity() const
    5746  {
    5747  return VMA_MAX(
    5748  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    5749  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    5750  }
    5751 
    5752  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    5753  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    5754 
    5755  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    5756  {
    5757  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    5758  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    5759  }
    5760  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    5761  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    5762  {
    5763  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    5764  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    5765  }
    5766  // Minimum alignment for all allocations in specific memory type.
    5767  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    5768  {
    5769  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    5770  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    5771  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    5772  }
    5773 
    5774  bool IsIntegratedGpu() const
    5775  {
    5776  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    5777  }
    5778 
    5779 #if VMA_RECORDING_ENABLED
    5780  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    5781 #endif
    5782 
    5783  void GetBufferMemoryRequirements(
    5784  VkBuffer hBuffer,
    5785  VkMemoryRequirements& memReq,
    5786  bool& requiresDedicatedAllocation,
    5787  bool& prefersDedicatedAllocation) const;
    5788  void GetImageMemoryRequirements(
    5789  VkImage hImage,
    5790  VkMemoryRequirements& memReq,
    5791  bool& requiresDedicatedAllocation,
    5792  bool& prefersDedicatedAllocation) const;
    5793 
    5794  // Main allocation function.
    5795  VkResult AllocateMemory(
    5796  const VkMemoryRequirements& vkMemReq,
    5797  bool requiresDedicatedAllocation,
    5798  bool prefersDedicatedAllocation,
    5799  VkBuffer dedicatedBuffer,
    5800  VkImage dedicatedImage,
    5801  const VmaAllocationCreateInfo& createInfo,
    5802  VmaSuballocationType suballocType,
    5803  VmaAllocation* pAllocation);
    5804 
    5805  // Main deallocation function.
    5806  void FreeMemory(const VmaAllocation allocation);
    5807 
    5808  VkResult ResizeAllocation(
    5809  const VmaAllocation alloc,
    5810  VkDeviceSize newSize);
    5811 
    5812  void CalculateStats(VmaStats* pStats);
    5813 
    5814 #if VMA_STATS_STRING_ENABLED
    5815  void PrintDetailedMap(class VmaJsonWriter& json);
    5816 #endif
    5817 
    5818  VkResult Defragment(
    5819  VmaAllocation* pAllocations,
    5820  size_t allocationCount,
    5821  VkBool32* pAllocationsChanged,
    5822  const VmaDefragmentationInfo* pDefragmentationInfo,
    5823  VmaDefragmentationStats* pDefragmentationStats);
    5824 
    5825  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    5826  bool TouchAllocation(VmaAllocation hAllocation);
    5827 
    5828  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    5829  void DestroyPool(VmaPool pool);
    5830  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    5831 
    5832  void SetCurrentFrameIndex(uint32_t frameIndex);
    5833  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    5834 
    5835  void MakePoolAllocationsLost(
    5836  VmaPool hPool,
    5837  size_t* pLostAllocationCount);
    5838  VkResult CheckPoolCorruption(VmaPool hPool);
    5839  VkResult CheckCorruption(uint32_t memoryTypeBits);
    5840 
    5841  void CreateLostAllocation(VmaAllocation* pAllocation);
    5842 
    5843  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    5844  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    5845 
    5846  VkResult Map(VmaAllocation hAllocation, void** ppData);
    5847  void Unmap(VmaAllocation hAllocation);
    5848 
    5849  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    5850  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    5851 
    5852  void FlushOrInvalidateAllocation(
    5853  VmaAllocation hAllocation,
    5854  VkDeviceSize offset, VkDeviceSize size,
    5855  VMA_CACHE_OPERATION op);
    5856 
    5857  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    5858 
    5859 private:
    5860  VkDeviceSize m_PreferredLargeHeapBlockSize;
    5861 
    5862  VkPhysicalDevice m_PhysicalDevice;
    5863  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    5864 
    5865  VMA_MUTEX m_PoolsMutex;
    5866  // Protected by m_PoolsMutex. Sorted by pointer value.
    5867  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    5868  uint32_t m_NextPoolId;
    5869 
    5870  VmaVulkanFunctions m_VulkanFunctions;
    5871 
    5872 #if VMA_RECORDING_ENABLED
    5873  VmaRecorder* m_pRecorder;
    5874 #endif
    5875 
    5876  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    5877 
    5878  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    5879 
    5880  VkResult AllocateMemoryOfType(
    5881  VkDeviceSize size,
    5882  VkDeviceSize alignment,
    5883  bool dedicatedAllocation,
    5884  VkBuffer dedicatedBuffer,
    5885  VkImage dedicatedImage,
    5886  const VmaAllocationCreateInfo& createInfo,
    5887  uint32_t memTypeIndex,
    5888  VmaSuballocationType suballocType,
    5889  VmaAllocation* pAllocation);
    5890 
    5891  // Allocates and registers new VkDeviceMemory specifically for single allocation.
    5892  VkResult AllocateDedicatedMemory(
    5893  VkDeviceSize size,
    5894  VmaSuballocationType suballocType,
    5895  uint32_t memTypeIndex,
    5896  bool map,
    5897  bool isUserDataString,
    5898  void* pUserData,
    5899  VkBuffer dedicatedBuffer,
    5900  VkImage dedicatedImage,
    5901  VmaAllocation* pAllocation);
    5902 
    5903  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    5904  void FreeDedicatedMemory(VmaAllocation allocation);
    5905 };
    5906 
    5908 // Memory allocation #2 after VmaAllocator_T definition
    5909 
    5910 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    5911 {
    5912  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    5913 }
    5914 
    5915 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    5916 {
    5917  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    5918 }
    5919 
    5920 template<typename T>
    5921 static T* VmaAllocate(VmaAllocator hAllocator)
    5922 {
    5923  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    5924 }
    5925 
    5926 template<typename T>
    5927 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    5928 {
    5929  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    5930 }
    5931 
    5932 template<typename T>
    5933 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    5934 {
    5935  if(ptr != VMA_NULL)
    5936  {
    5937  ptr->~T();
    5938  VmaFree(hAllocator, ptr);
    5939  }
    5940 }
    5941 
    5942 template<typename T>
    5943 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    5944 {
    5945  if(ptr != VMA_NULL)
    5946  {
    5947  for(size_t i = count; i--; )
    5948  ptr[i].~T();
    5949  VmaFree(hAllocator, ptr);
    5950  }
    5951 }
    5952 
    5954 // VmaStringBuilder
    5955 
    5956 #if VMA_STATS_STRING_ENABLED
    5957 
    5958 class VmaStringBuilder
    5959 {
    5960 public:
    5961  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    5962  size_t GetLength() const { return m_Data.size(); }
    5963  const char* GetData() const { return m_Data.data(); }
    5964 
    5965  void Add(char ch) { m_Data.push_back(ch); }
    5966  void Add(const char* pStr);
    5967  void AddNewLine() { Add('\n'); }
    5968  void AddNumber(uint32_t num);
    5969  void AddNumber(uint64_t num);
    5970  void AddPointer(const void* ptr);
    5971 
    5972 private:
    5973  VmaVector< char, VmaStlAllocator<char> > m_Data;
    5974 };
    5975 
    5976 void VmaStringBuilder::Add(const char* pStr)
    5977 {
    5978  const size_t strLen = strlen(pStr);
    5979  if(strLen > 0)
    5980  {
    5981  const size_t oldCount = m_Data.size();
    5982  m_Data.resize(oldCount + strLen);
    5983  memcpy(m_Data.data() + oldCount, pStr, strLen);
    5984  }
    5985 }
    5986 
    5987 void VmaStringBuilder::AddNumber(uint32_t num)
    5988 {
    5989  char buf[11];
    5990  VmaUint32ToStr(buf, sizeof(buf), num);
    5991  Add(buf);
    5992 }
    5993 
    5994 void VmaStringBuilder::AddNumber(uint64_t num)
    5995 {
    5996  char buf[21];
    5997  VmaUint64ToStr(buf, sizeof(buf), num);
    5998  Add(buf);
    5999 }
    6000 
    6001 void VmaStringBuilder::AddPointer(const void* ptr)
    6002 {
    6003  char buf[21];
    6004  VmaPtrToStr(buf, sizeof(buf), ptr);
    6005  Add(buf);
    6006 }
    6007 
    6008 #endif // #if VMA_STATS_STRING_ENABLED
    6009 
    6011 // VmaJsonWriter
    6012 
    6013 #if VMA_STATS_STRING_ENABLED
    6014 
    6015 class VmaJsonWriter
    6016 {
    6017  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6018 public:
    6019  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6020  ~VmaJsonWriter();
    6021 
    6022  void BeginObject(bool singleLine = false);
    6023  void EndObject();
    6024 
    6025  void BeginArray(bool singleLine = false);
    6026  void EndArray();
    6027 
    6028  void WriteString(const char* pStr);
    6029  void BeginString(const char* pStr = VMA_NULL);
    6030  void ContinueString(const char* pStr);
    6031  void ContinueString(uint32_t n);
    6032  void ContinueString(uint64_t n);
    6033  void ContinueString_Pointer(const void* ptr);
    6034  void EndString(const char* pStr = VMA_NULL);
    6035 
    6036  void WriteNumber(uint32_t n);
    6037  void WriteNumber(uint64_t n);
    6038  void WriteBool(bool b);
    6039  void WriteNull();
    6040 
    6041 private:
    6042  static const char* const INDENT;
    6043 
    6044  enum COLLECTION_TYPE
    6045  {
    6046  COLLECTION_TYPE_OBJECT,
    6047  COLLECTION_TYPE_ARRAY,
    6048  };
    6049  struct StackItem
    6050  {
    6051  COLLECTION_TYPE type;
    6052  uint32_t valueCount;
    6053  bool singleLineMode;
    6054  };
    6055 
    6056  VmaStringBuilder& m_SB;
    6057  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6058  bool m_InsideString;
    6059 
    6060  void BeginValue(bool isString);
    6061  void WriteIndent(bool oneLess = false);
    6062 };
    6063 
    6064 const char* const VmaJsonWriter::INDENT = " ";
    6065 
    6066 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6067  m_SB(sb),
    6068  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6069  m_InsideString(false)
    6070 {
    6071 }
    6072 
    6073 VmaJsonWriter::~VmaJsonWriter()
    6074 {
    6075  VMA_ASSERT(!m_InsideString);
    6076  VMA_ASSERT(m_Stack.empty());
    6077 }
    6078 
    6079 void VmaJsonWriter::BeginObject(bool singleLine)
    6080 {
    6081  VMA_ASSERT(!m_InsideString);
    6082 
    6083  BeginValue(false);
    6084  m_SB.Add('{');
    6085 
    6086  StackItem item;
    6087  item.type = COLLECTION_TYPE_OBJECT;
    6088  item.valueCount = 0;
    6089  item.singleLineMode = singleLine;
    6090  m_Stack.push_back(item);
    6091 }
    6092 
    6093 void VmaJsonWriter::EndObject()
    6094 {
    6095  VMA_ASSERT(!m_InsideString);
    6096 
    6097  WriteIndent(true);
    6098  m_SB.Add('}');
    6099 
    6100  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    6101  m_Stack.pop_back();
    6102 }
    6103 
    6104 void VmaJsonWriter::BeginArray(bool singleLine)
    6105 {
    6106  VMA_ASSERT(!m_InsideString);
    6107 
    6108  BeginValue(false);
    6109  m_SB.Add('[');
    6110 
    6111  StackItem item;
    6112  item.type = COLLECTION_TYPE_ARRAY;
    6113  item.valueCount = 0;
    6114  item.singleLineMode = singleLine;
    6115  m_Stack.push_back(item);
    6116 }
    6117 
    6118 void VmaJsonWriter::EndArray()
    6119 {
    6120  VMA_ASSERT(!m_InsideString);
    6121 
    6122  WriteIndent(true);
    6123  m_SB.Add(']');
    6124 
    6125  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    6126  m_Stack.pop_back();
    6127 }
    6128 
    6129 void VmaJsonWriter::WriteString(const char* pStr)
    6130 {
    6131  BeginString(pStr);
    6132  EndString();
    6133 }
    6134 
    6135 void VmaJsonWriter::BeginString(const char* pStr)
    6136 {
    6137  VMA_ASSERT(!m_InsideString);
    6138 
    6139  BeginValue(true);
    6140  m_SB.Add('"');
    6141  m_InsideString = true;
    6142  if(pStr != VMA_NULL && pStr[0] != '\0')
    6143  {
    6144  ContinueString(pStr);
    6145  }
    6146 }
    6147 
    6148 void VmaJsonWriter::ContinueString(const char* pStr)
    6149 {
    6150  VMA_ASSERT(m_InsideString);
    6151 
    6152  const size_t strLen = strlen(pStr);
    6153  for(size_t i = 0; i < strLen; ++i)
    6154  {
    6155  char ch = pStr[i];
    6156  if(ch == '\\')
    6157  {
    6158  m_SB.Add("\\\\");
    6159  }
    6160  else if(ch == '"')
    6161  {
    6162  m_SB.Add("\\\"");
    6163  }
    6164  else if(ch >= 32)
    6165  {
    6166  m_SB.Add(ch);
    6167  }
    6168  else switch(ch)
    6169  {
    6170  case '\b':
    6171  m_SB.Add("\\b");
    6172  break;
    6173  case '\f':
    6174  m_SB.Add("\\f");
    6175  break;
    6176  case '\n':
    6177  m_SB.Add("\\n");
    6178  break;
    6179  case '\r':
    6180  m_SB.Add("\\r");
    6181  break;
    6182  case '\t':
    6183  m_SB.Add("\\t");
    6184  break;
    6185  default:
    6186  VMA_ASSERT(0 && "Character not currently supported.");
    6187  break;
    6188  }
    6189  }
    6190 }
    6191 
    6192 void VmaJsonWriter::ContinueString(uint32_t n)
    6193 {
    6194  VMA_ASSERT(m_InsideString);
    6195  m_SB.AddNumber(n);
    6196 }
    6197 
    6198 void VmaJsonWriter::ContinueString(uint64_t n)
    6199 {
    6200  VMA_ASSERT(m_InsideString);
    6201  m_SB.AddNumber(n);
    6202 }
    6203 
    6204 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    6205 {
    6206  VMA_ASSERT(m_InsideString);
    6207  m_SB.AddPointer(ptr);
    6208 }
    6209 
    6210 void VmaJsonWriter::EndString(const char* pStr)
    6211 {
    6212  VMA_ASSERT(m_InsideString);
    6213  if(pStr != VMA_NULL && pStr[0] != '\0')
    6214  {
    6215  ContinueString(pStr);
    6216  }
    6217  m_SB.Add('"');
    6218  m_InsideString = false;
    6219 }
    6220 
    6221 void VmaJsonWriter::WriteNumber(uint32_t n)
    6222 {
    6223  VMA_ASSERT(!m_InsideString);
    6224  BeginValue(false);
    6225  m_SB.AddNumber(n);
    6226 }
    6227 
    6228 void VmaJsonWriter::WriteNumber(uint64_t n)
    6229 {
    6230  VMA_ASSERT(!m_InsideString);
    6231  BeginValue(false);
    6232  m_SB.AddNumber(n);
    6233 }
    6234 
    6235 void VmaJsonWriter::WriteBool(bool b)
    6236 {
    6237  VMA_ASSERT(!m_InsideString);
    6238  BeginValue(false);
    6239  m_SB.Add(b ? "true" : "false");
    6240 }
    6241 
    6242 void VmaJsonWriter::WriteNull()
    6243 {
    6244  VMA_ASSERT(!m_InsideString);
    6245  BeginValue(false);
    6246  m_SB.Add("null");
    6247 }
    6248 
    6249 void VmaJsonWriter::BeginValue(bool isString)
    6250 {
    6251  if(!m_Stack.empty())
    6252  {
    6253  StackItem& currItem = m_Stack.back();
    6254  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6255  currItem.valueCount % 2 == 0)
    6256  {
    6257  VMA_ASSERT(isString);
    6258  }
    6259 
    6260  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    6261  currItem.valueCount % 2 != 0)
    6262  {
    6263  m_SB.Add(": ");
    6264  }
    6265  else if(currItem.valueCount > 0)
    6266  {
    6267  m_SB.Add(", ");
    6268  WriteIndent();
    6269  }
    6270  else
    6271  {
    6272  WriteIndent();
    6273  }
    6274  ++currItem.valueCount;
    6275  }
    6276 }
    6277 
    6278 void VmaJsonWriter::WriteIndent(bool oneLess)
    6279 {
    6280  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    6281  {
    6282  m_SB.AddNewLine();
    6283 
    6284  size_t count = m_Stack.size();
    6285  if(count > 0 && oneLess)
    6286  {
    6287  --count;
    6288  }
    6289  for(size_t i = 0; i < count; ++i)
    6290  {
    6291  m_SB.Add(INDENT);
    6292  }
    6293  }
    6294 }
    6295 
    6296 #endif // #if VMA_STATS_STRING_ENABLED
    6297 
    6299 
    6300 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    6301 {
    6302  if(IsUserDataString())
    6303  {
    6304  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    6305 
    6306  FreeUserDataString(hAllocator);
    6307 
    6308  if(pUserData != VMA_NULL)
    6309  {
    6310  const char* const newStrSrc = (char*)pUserData;
    6311  const size_t newStrLen = strlen(newStrSrc);
    6312  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    6313  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    6314  m_pUserData = newStrDst;
    6315  }
    6316  }
    6317  else
    6318  {
    6319  m_pUserData = pUserData;
    6320  }
    6321 }
    6322 
    6323 void VmaAllocation_T::ChangeBlockAllocation(
    6324  VmaAllocator hAllocator,
    6325  VmaDeviceMemoryBlock* block,
    6326  VkDeviceSize offset)
    6327 {
    6328  VMA_ASSERT(block != VMA_NULL);
    6329  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6330 
    6331  // Move mapping reference counter from old block to new block.
    6332  if(block != m_BlockAllocation.m_Block)
    6333  {
    6334  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    6335  if(IsPersistentMap())
    6336  ++mapRefCount;
    6337  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    6338  block->Map(hAllocator, mapRefCount, VMA_NULL);
    6339  }
    6340 
    6341  m_BlockAllocation.m_Block = block;
    6342  m_BlockAllocation.m_Offset = offset;
    6343 }
    6344 
    6345 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    6346 {
    6347  VMA_ASSERT(newSize > 0);
    6348  m_Size = newSize;
    6349 }
    6350 
    6351 VkDeviceSize VmaAllocation_T::GetOffset() const
    6352 {
    6353  switch(m_Type)
    6354  {
    6355  case ALLOCATION_TYPE_BLOCK:
    6356  return m_BlockAllocation.m_Offset;
    6357  case ALLOCATION_TYPE_DEDICATED:
    6358  return 0;
    6359  default:
    6360  VMA_ASSERT(0);
    6361  return 0;
    6362  }
    6363 }
    6364 
    6365 VkDeviceMemory VmaAllocation_T::GetMemory() const
    6366 {
    6367  switch(m_Type)
    6368  {
    6369  case ALLOCATION_TYPE_BLOCK:
    6370  return m_BlockAllocation.m_Block->GetDeviceMemory();
    6371  case ALLOCATION_TYPE_DEDICATED:
    6372  return m_DedicatedAllocation.m_hMemory;
    6373  default:
    6374  VMA_ASSERT(0);
    6375  return VK_NULL_HANDLE;
    6376  }
    6377 }
    6378 
    6379 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    6380 {
    6381  switch(m_Type)
    6382  {
    6383  case ALLOCATION_TYPE_BLOCK:
    6384  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    6385  case ALLOCATION_TYPE_DEDICATED:
    6386  return m_DedicatedAllocation.m_MemoryTypeIndex;
    6387  default:
    6388  VMA_ASSERT(0);
    6389  return UINT32_MAX;
    6390  }
    6391 }
    6392 
    6393 void* VmaAllocation_T::GetMappedData() const
    6394 {
    6395  switch(m_Type)
    6396  {
    6397  case ALLOCATION_TYPE_BLOCK:
    6398  if(m_MapCount != 0)
    6399  {
    6400  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    6401  VMA_ASSERT(pBlockData != VMA_NULL);
    6402  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    6403  }
    6404  else
    6405  {
    6406  return VMA_NULL;
    6407  }
    6408  break;
    6409  case ALLOCATION_TYPE_DEDICATED:
    6410  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    6411  return m_DedicatedAllocation.m_pMappedData;
    6412  default:
    6413  VMA_ASSERT(0);
    6414  return VMA_NULL;
    6415  }
    6416 }
    6417 
    6418 bool VmaAllocation_T::CanBecomeLost() const
    6419 {
    6420  switch(m_Type)
    6421  {
    6422  case ALLOCATION_TYPE_BLOCK:
    6423  return m_BlockAllocation.m_CanBecomeLost;
    6424  case ALLOCATION_TYPE_DEDICATED:
    6425  return false;
    6426  default:
    6427  VMA_ASSERT(0);
    6428  return false;
    6429  }
    6430 }
    6431 
    6432 VmaPool VmaAllocation_T::GetPool() const
    6433 {
    6434  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    6435  return m_BlockAllocation.m_hPool;
    6436 }
    6437 
    6438 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    6439 {
    6440  VMA_ASSERT(CanBecomeLost());
    6441 
    6442  /*
    6443  Warning: This is a carefully designed algorithm.
    6444  Do not modify unless you really know what you're doing :)
    6445  */
    6446  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    6447  for(;;)
    6448  {
    6449  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    6450  {
    6451  VMA_ASSERT(0);
    6452  return false;
    6453  }
    6454  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    6455  {
    6456  return false;
    6457  }
    6458  else // Last use time earlier than current time.
    6459  {
    6460  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    6461  {
    6462  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    6463  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    6464  return true;
    6465  }
    6466  }
    6467  }
    6468 }
    6469 
    6470 #if VMA_STATS_STRING_ENABLED
    6471 
    6472 // Correspond to values of enum VmaSuballocationType.
    6473 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    6474  "FREE",
    6475  "UNKNOWN",
    6476  "BUFFER",
    6477  "IMAGE_UNKNOWN",
    6478  "IMAGE_LINEAR",
    6479  "IMAGE_OPTIMAL",
    6480 };
    6481 
    6482 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    6483 {
    6484  json.WriteString("Type");
    6485  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    6486 
    6487  json.WriteString("Size");
    6488  json.WriteNumber(m_Size);
    6489 
    6490  if(m_pUserData != VMA_NULL)
    6491  {
    6492  json.WriteString("UserData");
    6493  if(IsUserDataString())
    6494  {
    6495  json.WriteString((const char*)m_pUserData);
    6496  }
    6497  else
    6498  {
    6499  json.BeginString();
    6500  json.ContinueString_Pointer(m_pUserData);
    6501  json.EndString();
    6502  }
    6503  }
    6504 
    6505  json.WriteString("CreationFrameIndex");
    6506  json.WriteNumber(m_CreationFrameIndex);
    6507 
    6508  json.WriteString("LastUseFrameIndex");
    6509  json.WriteNumber(GetLastUseFrameIndex());
    6510 
    6511  if(m_BufferImageUsage != 0)
    6512  {
    6513  json.WriteString("Usage");
    6514  json.WriteNumber(m_BufferImageUsage);
    6515  }
    6516 }
    6517 
    6518 #endif
    6519 
    6520 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    6521 {
    6522  VMA_ASSERT(IsUserDataString());
    6523  if(m_pUserData != VMA_NULL)
    6524  {
    6525  char* const oldStr = (char*)m_pUserData;
    6526  const size_t oldStrLen = strlen(oldStr);
    6527  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    6528  m_pUserData = VMA_NULL;
    6529  }
    6530 }
    6531 
    6532 void VmaAllocation_T::BlockAllocMap()
    6533 {
    6534  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6535 
    6536  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6537  {
    6538  ++m_MapCount;
    6539  }
    6540  else
    6541  {
    6542  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    6543  }
    6544 }
    6545 
    6546 void VmaAllocation_T::BlockAllocUnmap()
    6547 {
    6548  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    6549 
    6550  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6551  {
    6552  --m_MapCount;
    6553  }
    6554  else
    6555  {
    6556  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    6557  }
    6558 }
    6559 
    6560 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    6561 {
    6562  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6563 
    6564  if(m_MapCount != 0)
    6565  {
    6566  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    6567  {
    6568  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    6569  *ppData = m_DedicatedAllocation.m_pMappedData;
    6570  ++m_MapCount;
    6571  return VK_SUCCESS;
    6572  }
    6573  else
    6574  {
    6575  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    6576  return VK_ERROR_MEMORY_MAP_FAILED;
    6577  }
    6578  }
    6579  else
    6580  {
    6581  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    6582  hAllocator->m_hDevice,
    6583  m_DedicatedAllocation.m_hMemory,
    6584  0, // offset
    6585  VK_WHOLE_SIZE,
    6586  0, // flags
    6587  ppData);
    6588  if(result == VK_SUCCESS)
    6589  {
    6590  m_DedicatedAllocation.m_pMappedData = *ppData;
    6591  m_MapCount = 1;
    6592  }
    6593  return result;
    6594  }
    6595 }
    6596 
    6597 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    6598 {
    6599  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    6600 
    6601  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    6602  {
    6603  --m_MapCount;
    6604  if(m_MapCount == 0)
    6605  {
    6606  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    6607  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    6608  hAllocator->m_hDevice,
    6609  m_DedicatedAllocation.m_hMemory);
    6610  }
    6611  }
    6612  else
    6613  {
    6614  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    6615  }
    6616 }
    6617 
    6618 #if VMA_STATS_STRING_ENABLED
    6619 
    6620 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    6621 {
    6622  json.BeginObject();
    6623 
    6624  json.WriteString("Blocks");
    6625  json.WriteNumber(stat.blockCount);
    6626 
    6627  json.WriteString("Allocations");
    6628  json.WriteNumber(stat.allocationCount);
    6629 
    6630  json.WriteString("UnusedRanges");
    6631  json.WriteNumber(stat.unusedRangeCount);
    6632 
    6633  json.WriteString("UsedBytes");
    6634  json.WriteNumber(stat.usedBytes);
    6635 
    6636  json.WriteString("UnusedBytes");
    6637  json.WriteNumber(stat.unusedBytes);
    6638 
    6639  if(stat.allocationCount > 1)
    6640  {
    6641  json.WriteString("AllocationSize");
    6642  json.BeginObject(true);
    6643  json.WriteString("Min");
    6644  json.WriteNumber(stat.allocationSizeMin);
    6645  json.WriteString("Avg");
    6646  json.WriteNumber(stat.allocationSizeAvg);
    6647  json.WriteString("Max");
    6648  json.WriteNumber(stat.allocationSizeMax);
    6649  json.EndObject();
    6650  }
    6651 
    6652  if(stat.unusedRangeCount > 1)
    6653  {
    6654  json.WriteString("UnusedRangeSize");
    6655  json.BeginObject(true);
    6656  json.WriteString("Min");
    6657  json.WriteNumber(stat.unusedRangeSizeMin);
    6658  json.WriteString("Avg");
    6659  json.WriteNumber(stat.unusedRangeSizeAvg);
    6660  json.WriteString("Max");
    6661  json.WriteNumber(stat.unusedRangeSizeMax);
    6662  json.EndObject();
    6663  }
    6664 
    6665  json.EndObject();
    6666 }
    6667 
    6668 #endif // #if VMA_STATS_STRING_ENABLED
    6669 
    6670 struct VmaSuballocationItemSizeLess
    6671 {
    6672  bool operator()(
    6673  const VmaSuballocationList::iterator lhs,
    6674  const VmaSuballocationList::iterator rhs) const
    6675  {
    6676  return lhs->size < rhs->size;
    6677  }
    6678  bool operator()(
    6679  const VmaSuballocationList::iterator lhs,
    6680  VkDeviceSize rhsSize) const
    6681  {
    6682  return lhs->size < rhsSize;
    6683  }
    6684 };
    6685 
    6686 
    6688 // class VmaBlockMetadata
    6689 
    6690 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    6691  m_Size(0),
    6692  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    6693 {
    6694 }
    6695 
    6696 #if VMA_STATS_STRING_ENABLED
    6697 
    6698 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    6699  VkDeviceSize unusedBytes,
    6700  size_t allocationCount,
    6701  size_t unusedRangeCount) const
    6702 {
    6703  json.BeginObject();
    6704 
    6705  json.WriteString("TotalBytes");
    6706  json.WriteNumber(GetSize());
    6707 
    6708  json.WriteString("UnusedBytes");
    6709  json.WriteNumber(unusedBytes);
    6710 
    6711  json.WriteString("Allocations");
    6712  json.WriteNumber((uint64_t)allocationCount);
    6713 
    6714  json.WriteString("UnusedRanges");
    6715  json.WriteNumber((uint64_t)unusedRangeCount);
    6716 
    6717  json.WriteString("Suballocations");
    6718  json.BeginArray();
    6719 }
    6720 
    6721 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    6722  VkDeviceSize offset,
    6723  VmaAllocation hAllocation) const
    6724 {
    6725  json.BeginObject(true);
    6726 
    6727  json.WriteString("Offset");
    6728  json.WriteNumber(offset);
    6729 
    6730  hAllocation->PrintParameters(json);
    6731 
    6732  json.EndObject();
    6733 }
    6734 
    6735 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    6736  VkDeviceSize offset,
    6737  VkDeviceSize size) const
    6738 {
    6739  json.BeginObject(true);
    6740 
    6741  json.WriteString("Offset");
    6742  json.WriteNumber(offset);
    6743 
    6744  json.WriteString("Type");
    6745  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    6746 
    6747  json.WriteString("Size");
    6748  json.WriteNumber(size);
    6749 
    6750  json.EndObject();
    6751 }
    6752 
    6753 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    6754 {
    6755  json.EndArray();
    6756  json.EndObject();
    6757 }
    6758 
    6759 #endif // #if VMA_STATS_STRING_ENABLED
    6760 
    6762 // class VmaBlockMetadata_Generic
    6763 
    6764 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    6765  VmaBlockMetadata(hAllocator),
    6766  m_FreeCount(0),
    6767  m_SumFreeSize(0),
    6768  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    6769  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    6770 {
    6771 }
    6772 
    6773 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    6774 {
    6775 }
    6776 
    6777 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    6778 {
    6779  VmaBlockMetadata::Init(size);
    6780 
    6781  m_FreeCount = 1;
    6782  m_SumFreeSize = size;
    6783 
    6784  VmaSuballocation suballoc = {};
    6785  suballoc.offset = 0;
    6786  suballoc.size = size;
    6787  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    6788  suballoc.hAllocation = VK_NULL_HANDLE;
    6789 
    6790  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    6791  m_Suballocations.push_back(suballoc);
    6792  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    6793  --suballocItem;
    6794  m_FreeSuballocationsBySize.push_back(suballocItem);
    6795 }
    6796 
    6797 bool VmaBlockMetadata_Generic::Validate() const
    6798 {
    6799  VMA_VALIDATE(!m_Suballocations.empty());
    6800 
    6801  // Expected offset of new suballocation as calculated from previous ones.
    6802  VkDeviceSize calculatedOffset = 0;
    6803  // Expected number of free suballocations as calculated from traversing their list.
    6804  uint32_t calculatedFreeCount = 0;
    6805  // Expected sum size of free suballocations as calculated from traversing their list.
    6806  VkDeviceSize calculatedSumFreeSize = 0;
    6807  // Expected number of free suballocations that should be registered in
    6808  // m_FreeSuballocationsBySize calculated from traversing their list.
    6809  size_t freeSuballocationsToRegister = 0;
    6810  // True if previous visited suballocation was free.
    6811  bool prevFree = false;
    6812 
    6813  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6814  suballocItem != m_Suballocations.cend();
    6815  ++suballocItem)
    6816  {
    6817  const VmaSuballocation& subAlloc = *suballocItem;
    6818 
    6819  // Actual offset of this suballocation doesn't match expected one.
    6820  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    6821 
    6822  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    6823  // Two adjacent free suballocations are invalid. They should be merged.
    6824  VMA_VALIDATE(!prevFree || !currFree);
    6825 
    6826  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    6827 
    6828  if(currFree)
    6829  {
    6830  calculatedSumFreeSize += subAlloc.size;
    6831  ++calculatedFreeCount;
    6832  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6833  {
    6834  ++freeSuballocationsToRegister;
    6835  }
    6836 
    6837  // Margin required between allocations - every free space must be at least that large.
    6838  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    6839  }
    6840  else
    6841  {
    6842  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    6843  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    6844 
    6845  // Margin required between allocations - previous allocation must be free.
    6846  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    6847  }
    6848 
    6849  calculatedOffset += subAlloc.size;
    6850  prevFree = currFree;
    6851  }
    6852 
    6853  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    6854  // match expected one.
    6855  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    6856 
    6857  VkDeviceSize lastSize = 0;
    6858  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    6859  {
    6860  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    6861 
    6862  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    6863  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    6864  // They must be sorted by size ascending.
    6865  VMA_VALIDATE(suballocItem->size >= lastSize);
    6866 
    6867  lastSize = suballocItem->size;
    6868  }
    6869 
    6870  // Check if totals match calculacted values.
    6871  VMA_VALIDATE(ValidateFreeSuballocationList());
    6872  VMA_VALIDATE(calculatedOffset == GetSize());
    6873  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    6874  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    6875 
    6876  return true;
    6877 }
    6878 
    6879 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    6880 {
    6881  if(!m_FreeSuballocationsBySize.empty())
    6882  {
    6883  return m_FreeSuballocationsBySize.back()->size;
    6884  }
    6885  else
    6886  {
    6887  return 0;
    6888  }
    6889 }
    6890 
    6891 bool VmaBlockMetadata_Generic::IsEmpty() const
    6892 {
    6893  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    6894 }
    6895 
    6896 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    6897 {
    6898  outInfo.blockCount = 1;
    6899 
    6900  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6901  outInfo.allocationCount = rangeCount - m_FreeCount;
    6902  outInfo.unusedRangeCount = m_FreeCount;
    6903 
    6904  outInfo.unusedBytes = m_SumFreeSize;
    6905  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    6906 
    6907  outInfo.allocationSizeMin = UINT64_MAX;
    6908  outInfo.allocationSizeMax = 0;
    6909  outInfo.unusedRangeSizeMin = UINT64_MAX;
    6910  outInfo.unusedRangeSizeMax = 0;
    6911 
    6912  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6913  suballocItem != m_Suballocations.cend();
    6914  ++suballocItem)
    6915  {
    6916  const VmaSuballocation& suballoc = *suballocItem;
    6917  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    6918  {
    6919  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    6920  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    6921  }
    6922  else
    6923  {
    6924  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    6925  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    6926  }
    6927  }
    6928 }
    6929 
    6930 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    6931 {
    6932  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    6933 
    6934  inoutStats.size += GetSize();
    6935  inoutStats.unusedSize += m_SumFreeSize;
    6936  inoutStats.allocationCount += rangeCount - m_FreeCount;
    6937  inoutStats.unusedRangeCount += m_FreeCount;
    6938  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    6939 }
    6940 
    6941 #if VMA_STATS_STRING_ENABLED
    6942 
    6943 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    6944 {
    6945  PrintDetailedMap_Begin(json,
    6946  m_SumFreeSize, // unusedBytes
    6947  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    6948  m_FreeCount); // unusedRangeCount
    6949 
    6950  size_t i = 0;
    6951  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    6952  suballocItem != m_Suballocations.cend();
    6953  ++suballocItem, ++i)
    6954  {
    6955  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    6956  {
    6957  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    6958  }
    6959  else
    6960  {
    6961  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    6962  }
    6963  }
    6964 
    6965  PrintDetailedMap_End(json);
    6966 }
    6967 
    6968 #endif // #if VMA_STATS_STRING_ENABLED
    6969 
    6970 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    6971  uint32_t currentFrameIndex,
    6972  uint32_t frameInUseCount,
    6973  VkDeviceSize bufferImageGranularity,
    6974  VkDeviceSize allocSize,
    6975  VkDeviceSize allocAlignment,
    6976  bool upperAddress,
    6977  VmaSuballocationType allocType,
    6978  bool canMakeOtherLost,
    6979  uint32_t strategy,
    6980  VmaAllocationRequest* pAllocationRequest)
    6981 {
    6982  VMA_ASSERT(allocSize > 0);
    6983  VMA_ASSERT(!upperAddress);
    6984  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    6985  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    6986  VMA_HEAVY_ASSERT(Validate());
    6987 
    6988  // There is not enough total free space in this block to fullfill the request: Early return.
    6989  if(canMakeOtherLost == false &&
    6990  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    6991  {
    6992  return false;
    6993  }
    6994 
    6995  // New algorithm, efficiently searching freeSuballocationsBySize.
    6996  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    6997  if(freeSuballocCount > 0)
    6998  {
    7000  {
    7001  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7002  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7003  m_FreeSuballocationsBySize.data(),
    7004  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7005  allocSize + 2 * VMA_DEBUG_MARGIN,
    7006  VmaSuballocationItemSizeLess());
    7007  size_t index = it - m_FreeSuballocationsBySize.data();
    7008  for(; index < freeSuballocCount; ++index)
    7009  {
    7010  if(CheckAllocation(
    7011  currentFrameIndex,
    7012  frameInUseCount,
    7013  bufferImageGranularity,
    7014  allocSize,
    7015  allocAlignment,
    7016  allocType,
    7017  m_FreeSuballocationsBySize[index],
    7018  false, // canMakeOtherLost
    7019  &pAllocationRequest->offset,
    7020  &pAllocationRequest->itemsToMakeLostCount,
    7021  &pAllocationRequest->sumFreeSize,
    7022  &pAllocationRequest->sumItemSize))
    7023  {
    7024  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7025  return true;
    7026  }
    7027  }
    7028  }
    7029  else // WORST_FIT, FIRST_FIT
    7030  {
    7031  // Search staring from biggest suballocations.
    7032  for(size_t index = freeSuballocCount; index--; )
    7033  {
    7034  if(CheckAllocation(
    7035  currentFrameIndex,
    7036  frameInUseCount,
    7037  bufferImageGranularity,
    7038  allocSize,
    7039  allocAlignment,
    7040  allocType,
    7041  m_FreeSuballocationsBySize[index],
    7042  false, // canMakeOtherLost
    7043  &pAllocationRequest->offset,
    7044  &pAllocationRequest->itemsToMakeLostCount,
    7045  &pAllocationRequest->sumFreeSize,
    7046  &pAllocationRequest->sumItemSize))
    7047  {
    7048  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7049  return true;
    7050  }
    7051  }
    7052  }
    7053  }
    7054 
    7055  if(canMakeOtherLost)
    7056  {
    7057  // Brute-force algorithm. TODO: Come up with something better.
    7058 
    7059  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
    7060  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
    7061 
    7062  VmaAllocationRequest tmpAllocRequest = {};
    7063  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    7064  suballocIt != m_Suballocations.end();
    7065  ++suballocIt)
    7066  {
    7067  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    7068  suballocIt->hAllocation->CanBecomeLost())
    7069  {
    7070  if(CheckAllocation(
    7071  currentFrameIndex,
    7072  frameInUseCount,
    7073  bufferImageGranularity,
    7074  allocSize,
    7075  allocAlignment,
    7076  allocType,
    7077  suballocIt,
    7078  canMakeOtherLost,
    7079  &tmpAllocRequest.offset,
    7080  &tmpAllocRequest.itemsToMakeLostCount,
    7081  &tmpAllocRequest.sumFreeSize,
    7082  &tmpAllocRequest.sumItemSize))
    7083  {
    7084  tmpAllocRequest.item = suballocIt;
    7085 
    7086  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
    7088  {
    7089  *pAllocationRequest = tmpAllocRequest;
    7090  }
    7091  }
    7092  }
    7093  }
    7094 
    7095  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
    7096  {
    7097  return true;
    7098  }
    7099  }
    7100 
    7101  return false;
    7102 }
    7103 
    7104 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    7105  uint32_t currentFrameIndex,
    7106  uint32_t frameInUseCount,
    7107  VmaAllocationRequest* pAllocationRequest)
    7108 {
    7109  while(pAllocationRequest->itemsToMakeLostCount > 0)
    7110  {
    7111  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    7112  {
    7113  ++pAllocationRequest->item;
    7114  }
    7115  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7116  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    7117  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    7118  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7119  {
    7120  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    7121  --pAllocationRequest->itemsToMakeLostCount;
    7122  }
    7123  else
    7124  {
    7125  return false;
    7126  }
    7127  }
    7128 
    7129  VMA_HEAVY_ASSERT(Validate());
    7130  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    7131  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7132 
    7133  return true;
    7134 }
    7135 
    7136 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7137 {
    7138  uint32_t lostAllocationCount = 0;
    7139  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7140  it != m_Suballocations.end();
    7141  ++it)
    7142  {
    7143  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    7144  it->hAllocation->CanBecomeLost() &&
    7145  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    7146  {
    7147  it = FreeSuballocation(it);
    7148  ++lostAllocationCount;
    7149  }
    7150  }
    7151  return lostAllocationCount;
    7152 }
    7153 
    7154 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    7155 {
    7156  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7157  it != m_Suballocations.end();
    7158  ++it)
    7159  {
    7160  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    7161  {
    7162  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    7163  {
    7164  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    7165  return VK_ERROR_VALIDATION_FAILED_EXT;
    7166  }
    7167  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    7168  {
    7169  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    7170  return VK_ERROR_VALIDATION_FAILED_EXT;
    7171  }
    7172  }
    7173  }
    7174 
    7175  return VK_SUCCESS;
    7176 }
    7177 
    7178 void VmaBlockMetadata_Generic::Alloc(
    7179  const VmaAllocationRequest& request,
    7180  VmaSuballocationType type,
    7181  VkDeviceSize allocSize,
    7182  bool upperAddress,
    7183  VmaAllocation hAllocation)
    7184 {
    7185  VMA_ASSERT(!upperAddress);
    7186  VMA_ASSERT(request.item != m_Suballocations.end());
    7187  VmaSuballocation& suballoc = *request.item;
    7188  // Given suballocation is a free block.
    7189  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7190  // Given offset is inside this suballocation.
    7191  VMA_ASSERT(request.offset >= suballoc.offset);
    7192  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    7193  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    7194  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    7195 
    7196  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    7197  // it to become used.
    7198  UnregisterFreeSuballocation(request.item);
    7199 
    7200  suballoc.offset = request.offset;
    7201  suballoc.size = allocSize;
    7202  suballoc.type = type;
    7203  suballoc.hAllocation = hAllocation;
    7204 
    7205  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    7206  if(paddingEnd)
    7207  {
    7208  VmaSuballocation paddingSuballoc = {};
    7209  paddingSuballoc.offset = request.offset + allocSize;
    7210  paddingSuballoc.size = paddingEnd;
    7211  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7212  VmaSuballocationList::iterator next = request.item;
    7213  ++next;
    7214  const VmaSuballocationList::iterator paddingEndItem =
    7215  m_Suballocations.insert(next, paddingSuballoc);
    7216  RegisterFreeSuballocation(paddingEndItem);
    7217  }
    7218 
    7219  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    7220  if(paddingBegin)
    7221  {
    7222  VmaSuballocation paddingSuballoc = {};
    7223  paddingSuballoc.offset = request.offset - paddingBegin;
    7224  paddingSuballoc.size = paddingBegin;
    7225  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7226  const VmaSuballocationList::iterator paddingBeginItem =
    7227  m_Suballocations.insert(request.item, paddingSuballoc);
    7228  RegisterFreeSuballocation(paddingBeginItem);
    7229  }
    7230 
    7231  // Update totals.
    7232  m_FreeCount = m_FreeCount - 1;
    7233  if(paddingBegin > 0)
    7234  {
    7235  ++m_FreeCount;
    7236  }
    7237  if(paddingEnd > 0)
    7238  {
    7239  ++m_FreeCount;
    7240  }
    7241  m_SumFreeSize -= allocSize;
    7242 }
    7243 
    7244 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    7245 {
    7246  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7247  suballocItem != m_Suballocations.end();
    7248  ++suballocItem)
    7249  {
    7250  VmaSuballocation& suballoc = *suballocItem;
    7251  if(suballoc.hAllocation == allocation)
    7252  {
    7253  FreeSuballocation(suballocItem);
    7254  VMA_HEAVY_ASSERT(Validate());
    7255  return;
    7256  }
    7257  }
    7258  VMA_ASSERT(0 && "Not found!");
    7259 }
    7260 
    7261 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    7262 {
    7263  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    7264  suballocItem != m_Suballocations.end();
    7265  ++suballocItem)
    7266  {
    7267  VmaSuballocation& suballoc = *suballocItem;
    7268  if(suballoc.offset == offset)
    7269  {
    7270  FreeSuballocation(suballocItem);
    7271  return;
    7272  }
    7273  }
    7274  VMA_ASSERT(0 && "Not found!");
    7275 }
    7276 
    7277 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    7278 {
    7279  typedef VmaSuballocationList::iterator iter_type;
    7280  for(iter_type suballocItem = m_Suballocations.begin();
    7281  suballocItem != m_Suballocations.end();
    7282  ++suballocItem)
    7283  {
    7284  VmaSuballocation& suballoc = *suballocItem;
    7285  if(suballoc.hAllocation == alloc)
    7286  {
    7287  iter_type nextItem = suballocItem;
    7288  ++nextItem;
    7289 
    7290  // Should have been ensured on higher level.
    7291  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    7292 
    7293  // Shrinking.
    7294  if(newSize < alloc->GetSize())
    7295  {
    7296  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    7297 
    7298  // There is next item.
    7299  if(nextItem != m_Suballocations.end())
    7300  {
    7301  // Next item is free.
    7302  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7303  {
    7304  // Grow this next item backward.
    7305  UnregisterFreeSuballocation(nextItem);
    7306  nextItem->offset -= sizeDiff;
    7307  nextItem->size += sizeDiff;
    7308  RegisterFreeSuballocation(nextItem);
    7309  }
    7310  // Next item is not free.
    7311  else
    7312  {
    7313  // Create free item after current one.
    7314  VmaSuballocation newFreeSuballoc;
    7315  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7316  newFreeSuballoc.offset = suballoc.offset + newSize;
    7317  newFreeSuballoc.size = sizeDiff;
    7318  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7319  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    7320  RegisterFreeSuballocation(newFreeSuballocIt);
    7321 
    7322  ++m_FreeCount;
    7323  }
    7324  }
    7325  // This is the last item.
    7326  else
    7327  {
    7328  // Create free item at the end.
    7329  VmaSuballocation newFreeSuballoc;
    7330  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    7331  newFreeSuballoc.offset = suballoc.offset + newSize;
    7332  newFreeSuballoc.size = sizeDiff;
    7333  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7334  m_Suballocations.push_back(newFreeSuballoc);
    7335 
    7336  iter_type newFreeSuballocIt = m_Suballocations.end();
    7337  RegisterFreeSuballocation(--newFreeSuballocIt);
    7338 
    7339  ++m_FreeCount;
    7340  }
    7341 
    7342  suballoc.size = newSize;
    7343  m_SumFreeSize += sizeDiff;
    7344  }
    7345  // Growing.
    7346  else
    7347  {
    7348  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    7349 
    7350  // There is next item.
    7351  if(nextItem != m_Suballocations.end())
    7352  {
    7353  // Next item is free.
    7354  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7355  {
    7356  // There is not enough free space, including margin.
    7357  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    7358  {
    7359  return false;
    7360  }
    7361 
    7362  // There is more free space than required.
    7363  if(nextItem->size > sizeDiff)
    7364  {
    7365  // Move and shrink this next item.
    7366  UnregisterFreeSuballocation(nextItem);
    7367  nextItem->offset += sizeDiff;
    7368  nextItem->size -= sizeDiff;
    7369  RegisterFreeSuballocation(nextItem);
    7370  }
    7371  // There is exactly the amount of free space required.
    7372  else
    7373  {
    7374  // Remove this next free item.
    7375  UnregisterFreeSuballocation(nextItem);
    7376  m_Suballocations.erase(nextItem);
    7377  --m_FreeCount;
    7378  }
    7379  }
    7380  // Next item is not free - there is no space to grow.
    7381  else
    7382  {
    7383  return false;
    7384  }
    7385  }
    7386  // This is the last item - there is no space to grow.
    7387  else
    7388  {
    7389  return false;
    7390  }
    7391 
    7392  suballoc.size = newSize;
    7393  m_SumFreeSize -= sizeDiff;
    7394  }
    7395 
    7396  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    7397  return true;
    7398  }
    7399  }
    7400  VMA_ASSERT(0 && "Not found!");
    7401  return false;
    7402 }
    7403 
    7404 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    7405 {
    7406  VkDeviceSize lastSize = 0;
    7407  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    7408  {
    7409  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    7410 
    7411  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    7412  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7413  VMA_VALIDATE(it->size >= lastSize);
    7414  lastSize = it->size;
    7415  }
    7416  return true;
    7417 }
    7418 
    7419 bool VmaBlockMetadata_Generic::CheckAllocation(
    7420  uint32_t currentFrameIndex,
    7421  uint32_t frameInUseCount,
    7422  VkDeviceSize bufferImageGranularity,
    7423  VkDeviceSize allocSize,
    7424  VkDeviceSize allocAlignment,
    7425  VmaSuballocationType allocType,
    7426  VmaSuballocationList::const_iterator suballocItem,
    7427  bool canMakeOtherLost,
    7428  VkDeviceSize* pOffset,
    7429  size_t* itemsToMakeLostCount,
    7430  VkDeviceSize* pSumFreeSize,
    7431  VkDeviceSize* pSumItemSize) const
    7432 {
    7433  VMA_ASSERT(allocSize > 0);
    7434  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7435  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    7436  VMA_ASSERT(pOffset != VMA_NULL);
    7437 
    7438  *itemsToMakeLostCount = 0;
    7439  *pSumFreeSize = 0;
    7440  *pSumItemSize = 0;
    7441 
    7442  if(canMakeOtherLost)
    7443  {
    7444  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7445  {
    7446  *pSumFreeSize = suballocItem->size;
    7447  }
    7448  else
    7449  {
    7450  if(suballocItem->hAllocation->CanBecomeLost() &&
    7451  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7452  {
    7453  ++*itemsToMakeLostCount;
    7454  *pSumItemSize = suballocItem->size;
    7455  }
    7456  else
    7457  {
    7458  return false;
    7459  }
    7460  }
    7461 
    7462  // Remaining size is too small for this request: Early return.
    7463  if(GetSize() - suballocItem->offset < allocSize)
    7464  {
    7465  return false;
    7466  }
    7467 
    7468  // Start from offset equal to beginning of this suballocation.
    7469  *pOffset = suballocItem->offset;
    7470 
    7471  // Apply VMA_DEBUG_MARGIN at the beginning.
    7472  if(VMA_DEBUG_MARGIN > 0)
    7473  {
    7474  *pOffset += VMA_DEBUG_MARGIN;
    7475  }
    7476 
    7477  // Apply alignment.
    7478  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7479 
    7480  // Check previous suballocations for BufferImageGranularity conflicts.
    7481  // Make bigger alignment if necessary.
    7482  if(bufferImageGranularity > 1)
    7483  {
    7484  bool bufferImageGranularityConflict = false;
    7485  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7486  while(prevSuballocItem != m_Suballocations.cbegin())
    7487  {
    7488  --prevSuballocItem;
    7489  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7490  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7491  {
    7492  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7493  {
    7494  bufferImageGranularityConflict = true;
    7495  break;
    7496  }
    7497  }
    7498  else
    7499  // Already on previous page.
    7500  break;
    7501  }
    7502  if(bufferImageGranularityConflict)
    7503  {
    7504  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7505  }
    7506  }
    7507 
    7508  // Now that we have final *pOffset, check if we are past suballocItem.
    7509  // If yes, return false - this function should be called for another suballocItem as starting point.
    7510  if(*pOffset >= suballocItem->offset + suballocItem->size)
    7511  {
    7512  return false;
    7513  }
    7514 
    7515  // Calculate padding at the beginning based on current offset.
    7516  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    7517 
    7518  // Calculate required margin at the end.
    7519  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7520 
    7521  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    7522  // Another early return check.
    7523  if(suballocItem->offset + totalSize > GetSize())
    7524  {
    7525  return false;
    7526  }
    7527 
    7528  // Advance lastSuballocItem until desired size is reached.
    7529  // Update itemsToMakeLostCount.
    7530  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    7531  if(totalSize > suballocItem->size)
    7532  {
    7533  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    7534  while(remainingSize > 0)
    7535  {
    7536  ++lastSuballocItem;
    7537  if(lastSuballocItem == m_Suballocations.cend())
    7538  {
    7539  return false;
    7540  }
    7541  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7542  {
    7543  *pSumFreeSize += lastSuballocItem->size;
    7544  }
    7545  else
    7546  {
    7547  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    7548  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    7549  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7550  {
    7551  ++*itemsToMakeLostCount;
    7552  *pSumItemSize += lastSuballocItem->size;
    7553  }
    7554  else
    7555  {
    7556  return false;
    7557  }
    7558  }
    7559  remainingSize = (lastSuballocItem->size < remainingSize) ?
    7560  remainingSize - lastSuballocItem->size : 0;
    7561  }
    7562  }
    7563 
    7564  // Check next suballocations for BufferImageGranularity conflicts.
    7565  // If conflict exists, we must mark more allocations lost or fail.
    7566  if(bufferImageGranularity > 1)
    7567  {
    7568  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    7569  ++nextSuballocItem;
    7570  while(nextSuballocItem != m_Suballocations.cend())
    7571  {
    7572  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7573  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7574  {
    7575  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7576  {
    7577  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    7578  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    7579  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    7580  {
    7581  ++*itemsToMakeLostCount;
    7582  }
    7583  else
    7584  {
    7585  return false;
    7586  }
    7587  }
    7588  }
    7589  else
    7590  {
    7591  // Already on next page.
    7592  break;
    7593  }
    7594  ++nextSuballocItem;
    7595  }
    7596  }
    7597  }
    7598  else
    7599  {
    7600  const VmaSuballocation& suballoc = *suballocItem;
    7601  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7602 
    7603  *pSumFreeSize = suballoc.size;
    7604 
    7605  // Size of this suballocation is too small for this request: Early return.
    7606  if(suballoc.size < allocSize)
    7607  {
    7608  return false;
    7609  }
    7610 
    7611  // Start from offset equal to beginning of this suballocation.
    7612  *pOffset = suballoc.offset;
    7613 
    7614  // Apply VMA_DEBUG_MARGIN at the beginning.
    7615  if(VMA_DEBUG_MARGIN > 0)
    7616  {
    7617  *pOffset += VMA_DEBUG_MARGIN;
    7618  }
    7619 
    7620  // Apply alignment.
    7621  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    7622 
    7623  // Check previous suballocations for BufferImageGranularity conflicts.
    7624  // Make bigger alignment if necessary.
    7625  if(bufferImageGranularity > 1)
    7626  {
    7627  bool bufferImageGranularityConflict = false;
    7628  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    7629  while(prevSuballocItem != m_Suballocations.cbegin())
    7630  {
    7631  --prevSuballocItem;
    7632  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    7633  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    7634  {
    7635  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    7636  {
    7637  bufferImageGranularityConflict = true;
    7638  break;
    7639  }
    7640  }
    7641  else
    7642  // Already on previous page.
    7643  break;
    7644  }
    7645  if(bufferImageGranularityConflict)
    7646  {
    7647  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    7648  }
    7649  }
    7650 
    7651  // Calculate padding at the beginning based on current offset.
    7652  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    7653 
    7654  // Calculate required margin at the end.
    7655  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    7656 
    7657  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    7658  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    7659  {
    7660  return false;
    7661  }
    7662 
    7663  // Check next suballocations for BufferImageGranularity conflicts.
    7664  // If conflict exists, allocation cannot be made here.
    7665  if(bufferImageGranularity > 1)
    7666  {
    7667  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    7668  ++nextSuballocItem;
    7669  while(nextSuballocItem != m_Suballocations.cend())
    7670  {
    7671  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    7672  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    7673  {
    7674  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    7675  {
    7676  return false;
    7677  }
    7678  }
    7679  else
    7680  {
    7681  // Already on next page.
    7682  break;
    7683  }
    7684  ++nextSuballocItem;
    7685  }
    7686  }
    7687  }
    7688 
    7689  // All tests passed: Success. pOffset is already filled.
    7690  return true;
    7691 }
    7692 
    7693 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    7694 {
    7695  VMA_ASSERT(item != m_Suballocations.end());
    7696  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7697 
    7698  VmaSuballocationList::iterator nextItem = item;
    7699  ++nextItem;
    7700  VMA_ASSERT(nextItem != m_Suballocations.end());
    7701  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7702 
    7703  item->size += nextItem->size;
    7704  --m_FreeCount;
    7705  m_Suballocations.erase(nextItem);
    7706 }
    7707 
    7708 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    7709 {
    7710  // Change this suballocation to be marked as free.
    7711  VmaSuballocation& suballoc = *suballocItem;
    7712  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7713  suballoc.hAllocation = VK_NULL_HANDLE;
    7714 
    7715  // Update totals.
    7716  ++m_FreeCount;
    7717  m_SumFreeSize += suballoc.size;
    7718 
    7719  // Merge with previous and/or next suballocation if it's also free.
    7720  bool mergeWithNext = false;
    7721  bool mergeWithPrev = false;
    7722 
    7723  VmaSuballocationList::iterator nextItem = suballocItem;
    7724  ++nextItem;
    7725  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    7726  {
    7727  mergeWithNext = true;
    7728  }
    7729 
    7730  VmaSuballocationList::iterator prevItem = suballocItem;
    7731  if(suballocItem != m_Suballocations.begin())
    7732  {
    7733  --prevItem;
    7734  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7735  {
    7736  mergeWithPrev = true;
    7737  }
    7738  }
    7739 
    7740  if(mergeWithNext)
    7741  {
    7742  UnregisterFreeSuballocation(nextItem);
    7743  MergeFreeWithNext(suballocItem);
    7744  }
    7745 
    7746  if(mergeWithPrev)
    7747  {
    7748  UnregisterFreeSuballocation(prevItem);
    7749  MergeFreeWithNext(prevItem);
    7750  RegisterFreeSuballocation(prevItem);
    7751  return prevItem;
    7752  }
    7753  else
    7754  {
    7755  RegisterFreeSuballocation(suballocItem);
    7756  return suballocItem;
    7757  }
    7758 }
    7759 
    7760 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    7761 {
    7762  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7763  VMA_ASSERT(item->size > 0);
    7764 
    7765  // You may want to enable this validation at the beginning or at the end of
    7766  // this function, depending on what do you want to check.
    7767  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7768 
    7769  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7770  {
    7771  if(m_FreeSuballocationsBySize.empty())
    7772  {
    7773  m_FreeSuballocationsBySize.push_back(item);
    7774  }
    7775  else
    7776  {
    7777  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    7778  }
    7779  }
    7780 
    7781  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7782 }
    7783 
    7784 
    7785 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    7786 {
    7787  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    7788  VMA_ASSERT(item->size > 0);
    7789 
    7790  // You may want to enable this validation at the beginning or at the end of
    7791  // this function, depending on what do you want to check.
    7792  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7793 
    7794  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7795  {
    7796  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7797  m_FreeSuballocationsBySize.data(),
    7798  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    7799  item,
    7800  VmaSuballocationItemSizeLess());
    7801  for(size_t index = it - m_FreeSuballocationsBySize.data();
    7802  index < m_FreeSuballocationsBySize.size();
    7803  ++index)
    7804  {
    7805  if(m_FreeSuballocationsBySize[index] == item)
    7806  {
    7807  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    7808  return;
    7809  }
    7810  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    7811  }
    7812  VMA_ASSERT(0 && "Not found.");
    7813  }
    7814 
    7815  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    7816 }
    7817 
    7819 // class VmaBlockMetadata_Linear
    7820 
    7821 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    7822  VmaBlockMetadata(hAllocator),
    7823  m_SumFreeSize(0),
    7824  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7825  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7826  m_1stVectorIndex(0),
    7827  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    7828  m_1stNullItemsBeginCount(0),
    7829  m_1stNullItemsMiddleCount(0),
    7830  m_2ndNullItemsCount(0)
    7831 {
    7832 }
    7833 
    7834 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    7835 {
    7836 }
    7837 
    7838 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    7839 {
    7840  VmaBlockMetadata::Init(size);
    7841  m_SumFreeSize = size;
    7842 }
    7843 
    7844 bool VmaBlockMetadata_Linear::Validate() const
    7845 {
    7846  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7847  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    7848 
    7849  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    7850  VMA_VALIDATE(!suballocations1st.empty() ||
    7851  suballocations2nd.empty() ||
    7852  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    7853 
    7854  if(!suballocations1st.empty())
    7855  {
    7856  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    7857  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    7858  // Null item at the end should be just pop_back().
    7859  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    7860  }
    7861  if(!suballocations2nd.empty())
    7862  {
    7863  // Null item at the end should be just pop_back().
    7864  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    7865  }
    7866 
    7867  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    7868  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    7869 
    7870  VkDeviceSize sumUsedSize = 0;
    7871  const size_t suballoc1stCount = suballocations1st.size();
    7872  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    7873 
    7874  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    7875  {
    7876  const size_t suballoc2ndCount = suballocations2nd.size();
    7877  size_t nullItem2ndCount = 0;
    7878  for(size_t i = 0; i < suballoc2ndCount; ++i)
    7879  {
    7880  const VmaSuballocation& suballoc = suballocations2nd[i];
    7881  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7882 
    7883  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7884  VMA_VALIDATE(suballoc.offset >= offset);
    7885 
    7886  if(!currFree)
    7887  {
    7888  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7889  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7890  sumUsedSize += suballoc.size;
    7891  }
    7892  else
    7893  {
    7894  ++nullItem2ndCount;
    7895  }
    7896 
    7897  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7898  }
    7899 
    7900  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7901  }
    7902 
    7903  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    7904  {
    7905  const VmaSuballocation& suballoc = suballocations1st[i];
    7906  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    7907  suballoc.hAllocation == VK_NULL_HANDLE);
    7908  }
    7909 
    7910  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    7911 
    7912  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    7913  {
    7914  const VmaSuballocation& suballoc = suballocations1st[i];
    7915  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7916 
    7917  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7918  VMA_VALIDATE(suballoc.offset >= offset);
    7919  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    7920 
    7921  if(!currFree)
    7922  {
    7923  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7924  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7925  sumUsedSize += suballoc.size;
    7926  }
    7927  else
    7928  {
    7929  ++nullItem1stCount;
    7930  }
    7931 
    7932  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7933  }
    7934  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    7935 
    7936  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    7937  {
    7938  const size_t suballoc2ndCount = suballocations2nd.size();
    7939  size_t nullItem2ndCount = 0;
    7940  for(size_t i = suballoc2ndCount; i--; )
    7941  {
    7942  const VmaSuballocation& suballoc = suballocations2nd[i];
    7943  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7944 
    7945  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    7946  VMA_VALIDATE(suballoc.offset >= offset);
    7947 
    7948  if(!currFree)
    7949  {
    7950  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    7951  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    7952  sumUsedSize += suballoc.size;
    7953  }
    7954  else
    7955  {
    7956  ++nullItem2ndCount;
    7957  }
    7958 
    7959  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    7960  }
    7961 
    7962  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    7963  }
    7964 
    7965  VMA_VALIDATE(offset <= GetSize());
    7966  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    7967 
    7968  return true;
    7969 }
    7970 
    7971 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    7972 {
    7973  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    7974  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    7975 }
    7976 
    7977 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    7978 {
    7979  const VkDeviceSize size = GetSize();
    7980 
    7981  /*
    7982  We don't consider gaps inside allocation vectors with freed allocations because
    7983  they are not suitable for reuse in linear allocator. We consider only space that
    7984  is available for new allocations.
    7985  */
    7986  if(IsEmpty())
    7987  {
    7988  return size;
    7989  }
    7990 
    7991  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    7992 
    7993  switch(m_2ndVectorMode)
    7994  {
    7995  case SECOND_VECTOR_EMPTY:
    7996  /*
    7997  Available space is after end of 1st, as well as before beginning of 1st (which
    7998  whould make it a ring buffer).
    7999  */
    8000  {
    8001  const size_t suballocations1stCount = suballocations1st.size();
    8002  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8003  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8004  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8005  return VMA_MAX(
    8006  firstSuballoc.offset,
    8007  size - (lastSuballoc.offset + lastSuballoc.size));
    8008  }
    8009  break;
    8010 
    8011  case SECOND_VECTOR_RING_BUFFER:
    8012  /*
    8013  Available space is only between end of 2nd and beginning of 1st.
    8014  */
    8015  {
    8016  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8017  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8018  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8019  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8020  }
    8021  break;
    8022 
    8023  case SECOND_VECTOR_DOUBLE_STACK:
    8024  /*
    8025  Available space is only between end of 1st and top of 2nd.
    8026  */
    8027  {
    8028  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8029  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    8030  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    8031  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    8032  }
    8033  break;
    8034 
    8035  default:
    8036  VMA_ASSERT(0);
    8037  return 0;
    8038  }
    8039 }
    8040 
    8041 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    8042 {
    8043  const VkDeviceSize size = GetSize();
    8044  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8045  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8046  const size_t suballoc1stCount = suballocations1st.size();
    8047  const size_t suballoc2ndCount = suballocations2nd.size();
    8048 
    8049  outInfo.blockCount = 1;
    8050  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    8051  outInfo.unusedRangeCount = 0;
    8052  outInfo.usedBytes = 0;
    8053  outInfo.allocationSizeMin = UINT64_MAX;
    8054  outInfo.allocationSizeMax = 0;
    8055  outInfo.unusedRangeSizeMin = UINT64_MAX;
    8056  outInfo.unusedRangeSizeMax = 0;
    8057 
    8058  VkDeviceSize lastOffset = 0;
    8059 
    8060  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8061  {
    8062  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8063  size_t nextAlloc2ndIndex = 0;
    8064  while(lastOffset < freeSpace2ndTo1stEnd)
    8065  {
    8066  // Find next non-null allocation or move nextAllocIndex to the end.
    8067  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8068  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8069  {
    8070  ++nextAlloc2ndIndex;
    8071  }
    8072 
    8073  // Found non-null allocation.
    8074  if(nextAlloc2ndIndex < suballoc2ndCount)
    8075  {
    8076  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8077 
    8078  // 1. Process free space before this allocation.
    8079  if(lastOffset < suballoc.offset)
    8080  {
    8081  // There is free space from lastOffset to suballoc.offset.
    8082  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8083  ++outInfo.unusedRangeCount;
    8084  outInfo.unusedBytes += unusedRangeSize;
    8085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8087  }
    8088 
    8089  // 2. Process this allocation.
    8090  // There is allocation with suballoc.offset, suballoc.size.
    8091  outInfo.usedBytes += suballoc.size;
    8092  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8093  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8094 
    8095  // 3. Prepare for next iteration.
    8096  lastOffset = suballoc.offset + suballoc.size;
    8097  ++nextAlloc2ndIndex;
    8098  }
    8099  // We are at the end.
    8100  else
    8101  {
    8102  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8103  if(lastOffset < freeSpace2ndTo1stEnd)
    8104  {
    8105  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8106  ++outInfo.unusedRangeCount;
    8107  outInfo.unusedBytes += unusedRangeSize;
    8108  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8109  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8110  }
    8111 
    8112  // End of loop.
    8113  lastOffset = freeSpace2ndTo1stEnd;
    8114  }
    8115  }
    8116  }
    8117 
    8118  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8119  const VkDeviceSize freeSpace1stTo2ndEnd =
    8120  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8121  while(lastOffset < freeSpace1stTo2ndEnd)
    8122  {
    8123  // Find next non-null allocation or move nextAllocIndex to the end.
    8124  while(nextAlloc1stIndex < suballoc1stCount &&
    8125  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8126  {
    8127  ++nextAlloc1stIndex;
    8128  }
    8129 
    8130  // Found non-null allocation.
    8131  if(nextAlloc1stIndex < suballoc1stCount)
    8132  {
    8133  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8134 
    8135  // 1. Process free space before this allocation.
    8136  if(lastOffset < suballoc.offset)
    8137  {
    8138  // There is free space from lastOffset to suballoc.offset.
    8139  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8140  ++outInfo.unusedRangeCount;
    8141  outInfo.unusedBytes += unusedRangeSize;
    8142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8144  }
    8145 
    8146  // 2. Process this allocation.
    8147  // There is allocation with suballoc.offset, suballoc.size.
    8148  outInfo.usedBytes += suballoc.size;
    8149  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8150  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8151 
    8152  // 3. Prepare for next iteration.
    8153  lastOffset = suballoc.offset + suballoc.size;
    8154  ++nextAlloc1stIndex;
    8155  }
    8156  // We are at the end.
    8157  else
    8158  {
    8159  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8160  if(lastOffset < freeSpace1stTo2ndEnd)
    8161  {
    8162  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8163  ++outInfo.unusedRangeCount;
    8164  outInfo.unusedBytes += unusedRangeSize;
    8165  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8166  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8167  }
    8168 
    8169  // End of loop.
    8170  lastOffset = freeSpace1stTo2ndEnd;
    8171  }
    8172  }
    8173 
    8174  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8175  {
    8176  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8177  while(lastOffset < size)
    8178  {
    8179  // Find next non-null allocation or move nextAllocIndex to the end.
    8180  while(nextAlloc2ndIndex != SIZE_MAX &&
    8181  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8182  {
    8183  --nextAlloc2ndIndex;
    8184  }
    8185 
    8186  // Found non-null allocation.
    8187  if(nextAlloc2ndIndex != SIZE_MAX)
    8188  {
    8189  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8190 
    8191  // 1. Process free space before this allocation.
    8192  if(lastOffset < suballoc.offset)
    8193  {
    8194  // There is free space from lastOffset to suballoc.offset.
    8195  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8196  ++outInfo.unusedRangeCount;
    8197  outInfo.unusedBytes += unusedRangeSize;
    8198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8200  }
    8201 
    8202  // 2. Process this allocation.
    8203  // There is allocation with suballoc.offset, suballoc.size.
    8204  outInfo.usedBytes += suballoc.size;
    8205  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    8206  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    8207 
    8208  // 3. Prepare for next iteration.
    8209  lastOffset = suballoc.offset + suballoc.size;
    8210  --nextAlloc2ndIndex;
    8211  }
    8212  // We are at the end.
    8213  else
    8214  {
    8215  // There is free space from lastOffset to size.
    8216  if(lastOffset < size)
    8217  {
    8218  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8219  ++outInfo.unusedRangeCount;
    8220  outInfo.unusedBytes += unusedRangeSize;
    8221  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    8222  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    8223  }
    8224 
    8225  // End of loop.
    8226  lastOffset = size;
    8227  }
    8228  }
    8229  }
    8230 
    8231  outInfo.unusedBytes = size - outInfo.usedBytes;
    8232 }
    8233 
    8234 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    8235 {
    8236  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8237  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8238  const VkDeviceSize size = GetSize();
    8239  const size_t suballoc1stCount = suballocations1st.size();
    8240  const size_t suballoc2ndCount = suballocations2nd.size();
    8241 
    8242  inoutStats.size += size;
    8243 
    8244  VkDeviceSize lastOffset = 0;
    8245 
    8246  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8247  {
    8248  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8249  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    8250  while(lastOffset < freeSpace2ndTo1stEnd)
    8251  {
    8252  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8253  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8254  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8255  {
    8256  ++nextAlloc2ndIndex;
    8257  }
    8258 
    8259  // Found non-null allocation.
    8260  if(nextAlloc2ndIndex < suballoc2ndCount)
    8261  {
    8262  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8263 
    8264  // 1. Process free space before this allocation.
    8265  if(lastOffset < suballoc.offset)
    8266  {
    8267  // There is free space from lastOffset to suballoc.offset.
    8268  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8269  inoutStats.unusedSize += unusedRangeSize;
    8270  ++inoutStats.unusedRangeCount;
    8271  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8272  }
    8273 
    8274  // 2. Process this allocation.
    8275  // There is allocation with suballoc.offset, suballoc.size.
    8276  ++inoutStats.allocationCount;
    8277 
    8278  // 3. Prepare for next iteration.
    8279  lastOffset = suballoc.offset + suballoc.size;
    8280  ++nextAlloc2ndIndex;
    8281  }
    8282  // We are at the end.
    8283  else
    8284  {
    8285  if(lastOffset < freeSpace2ndTo1stEnd)
    8286  {
    8287  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8288  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8289  inoutStats.unusedSize += unusedRangeSize;
    8290  ++inoutStats.unusedRangeCount;
    8291  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8292  }
    8293 
    8294  // End of loop.
    8295  lastOffset = freeSpace2ndTo1stEnd;
    8296  }
    8297  }
    8298  }
    8299 
    8300  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8301  const VkDeviceSize freeSpace1stTo2ndEnd =
    8302  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8303  while(lastOffset < freeSpace1stTo2ndEnd)
    8304  {
    8305  // Find next non-null allocation or move nextAllocIndex to the end.
    8306  while(nextAlloc1stIndex < suballoc1stCount &&
    8307  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8308  {
    8309  ++nextAlloc1stIndex;
    8310  }
    8311 
    8312  // Found non-null allocation.
    8313  if(nextAlloc1stIndex < suballoc1stCount)
    8314  {
    8315  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8316 
    8317  // 1. Process free space before this allocation.
    8318  if(lastOffset < suballoc.offset)
    8319  {
    8320  // There is free space from lastOffset to suballoc.offset.
    8321  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8322  inoutStats.unusedSize += unusedRangeSize;
    8323  ++inoutStats.unusedRangeCount;
    8324  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8325  }
    8326 
    8327  // 2. Process this allocation.
    8328  // There is allocation with suballoc.offset, suballoc.size.
    8329  ++inoutStats.allocationCount;
    8330 
    8331  // 3. Prepare for next iteration.
    8332  lastOffset = suballoc.offset + suballoc.size;
    8333  ++nextAlloc1stIndex;
    8334  }
    8335  // We are at the end.
    8336  else
    8337  {
    8338  if(lastOffset < freeSpace1stTo2ndEnd)
    8339  {
    8340  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8341  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8342  inoutStats.unusedSize += unusedRangeSize;
    8343  ++inoutStats.unusedRangeCount;
    8344  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8345  }
    8346 
    8347  // End of loop.
    8348  lastOffset = freeSpace1stTo2ndEnd;
    8349  }
    8350  }
    8351 
    8352  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8353  {
    8354  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8355  while(lastOffset < size)
    8356  {
    8357  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8358  while(nextAlloc2ndIndex != SIZE_MAX &&
    8359  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8360  {
    8361  --nextAlloc2ndIndex;
    8362  }
    8363 
    8364  // Found non-null allocation.
    8365  if(nextAlloc2ndIndex != SIZE_MAX)
    8366  {
    8367  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8368 
    8369  // 1. Process free space before this allocation.
    8370  if(lastOffset < suballoc.offset)
    8371  {
    8372  // There is free space from lastOffset to suballoc.offset.
    8373  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8374  inoutStats.unusedSize += unusedRangeSize;
    8375  ++inoutStats.unusedRangeCount;
    8376  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8377  }
    8378 
    8379  // 2. Process this allocation.
    8380  // There is allocation with suballoc.offset, suballoc.size.
    8381  ++inoutStats.allocationCount;
    8382 
    8383  // 3. Prepare for next iteration.
    8384  lastOffset = suballoc.offset + suballoc.size;
    8385  --nextAlloc2ndIndex;
    8386  }
    8387  // We are at the end.
    8388  else
    8389  {
    8390  if(lastOffset < size)
    8391  {
    8392  // There is free space from lastOffset to size.
    8393  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8394  inoutStats.unusedSize += unusedRangeSize;
    8395  ++inoutStats.unusedRangeCount;
    8396  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    8397  }
    8398 
    8399  // End of loop.
    8400  lastOffset = size;
    8401  }
    8402  }
    8403  }
    8404 }
    8405 
    8406 #if VMA_STATS_STRING_ENABLED
    8407 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    8408 {
    8409  const VkDeviceSize size = GetSize();
    8410  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8411  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8412  const size_t suballoc1stCount = suballocations1st.size();
    8413  const size_t suballoc2ndCount = suballocations2nd.size();
    8414 
    8415  // FIRST PASS
    8416 
    8417  size_t unusedRangeCount = 0;
    8418  VkDeviceSize usedBytes = 0;
    8419 
    8420  VkDeviceSize lastOffset = 0;
    8421 
    8422  size_t alloc2ndCount = 0;
    8423  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8424  {
    8425  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8426  size_t nextAlloc2ndIndex = 0;
    8427  while(lastOffset < freeSpace2ndTo1stEnd)
    8428  {
    8429  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8430  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8431  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8432  {
    8433  ++nextAlloc2ndIndex;
    8434  }
    8435 
    8436  // Found non-null allocation.
    8437  if(nextAlloc2ndIndex < suballoc2ndCount)
    8438  {
    8439  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8440 
    8441  // 1. Process free space before this allocation.
    8442  if(lastOffset < suballoc.offset)
    8443  {
    8444  // There is free space from lastOffset to suballoc.offset.
    8445  ++unusedRangeCount;
    8446  }
    8447 
    8448  // 2. Process this allocation.
    8449  // There is allocation with suballoc.offset, suballoc.size.
    8450  ++alloc2ndCount;
    8451  usedBytes += suballoc.size;
    8452 
    8453  // 3. Prepare for next iteration.
    8454  lastOffset = suballoc.offset + suballoc.size;
    8455  ++nextAlloc2ndIndex;
    8456  }
    8457  // We are at the end.
    8458  else
    8459  {
    8460  if(lastOffset < freeSpace2ndTo1stEnd)
    8461  {
    8462  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8463  ++unusedRangeCount;
    8464  }
    8465 
    8466  // End of loop.
    8467  lastOffset = freeSpace2ndTo1stEnd;
    8468  }
    8469  }
    8470  }
    8471 
    8472  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8473  size_t alloc1stCount = 0;
    8474  const VkDeviceSize freeSpace1stTo2ndEnd =
    8475  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    8476  while(lastOffset < freeSpace1stTo2ndEnd)
    8477  {
    8478  // Find next non-null allocation or move nextAllocIndex to the end.
    8479  while(nextAlloc1stIndex < suballoc1stCount &&
    8480  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8481  {
    8482  ++nextAlloc1stIndex;
    8483  }
    8484 
    8485  // Found non-null allocation.
    8486  if(nextAlloc1stIndex < suballoc1stCount)
    8487  {
    8488  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8489 
    8490  // 1. Process free space before this allocation.
    8491  if(lastOffset < suballoc.offset)
    8492  {
    8493  // There is free space from lastOffset to suballoc.offset.
    8494  ++unusedRangeCount;
    8495  }
    8496 
    8497  // 2. Process this allocation.
    8498  // There is allocation with suballoc.offset, suballoc.size.
    8499  ++alloc1stCount;
    8500  usedBytes += suballoc.size;
    8501 
    8502  // 3. Prepare for next iteration.
    8503  lastOffset = suballoc.offset + suballoc.size;
    8504  ++nextAlloc1stIndex;
    8505  }
    8506  // We are at the end.
    8507  else
    8508  {
    8509  if(lastOffset < size)
    8510  {
    8511  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8512  ++unusedRangeCount;
    8513  }
    8514 
    8515  // End of loop.
    8516  lastOffset = freeSpace1stTo2ndEnd;
    8517  }
    8518  }
    8519 
    8520  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8521  {
    8522  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8523  while(lastOffset < size)
    8524  {
    8525  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8526  while(nextAlloc2ndIndex != SIZE_MAX &&
    8527  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8528  {
    8529  --nextAlloc2ndIndex;
    8530  }
    8531 
    8532  // Found non-null allocation.
    8533  if(nextAlloc2ndIndex != SIZE_MAX)
    8534  {
    8535  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8536 
    8537  // 1. Process free space before this allocation.
    8538  if(lastOffset < suballoc.offset)
    8539  {
    8540  // There is free space from lastOffset to suballoc.offset.
    8541  ++unusedRangeCount;
    8542  }
    8543 
    8544  // 2. Process this allocation.
    8545  // There is allocation with suballoc.offset, suballoc.size.
    8546  ++alloc2ndCount;
    8547  usedBytes += suballoc.size;
    8548 
    8549  // 3. Prepare for next iteration.
    8550  lastOffset = suballoc.offset + suballoc.size;
    8551  --nextAlloc2ndIndex;
    8552  }
    8553  // We are at the end.
    8554  else
    8555  {
    8556  if(lastOffset < size)
    8557  {
    8558  // There is free space from lastOffset to size.
    8559  ++unusedRangeCount;
    8560  }
    8561 
    8562  // End of loop.
    8563  lastOffset = size;
    8564  }
    8565  }
    8566  }
    8567 
    8568  const VkDeviceSize unusedBytes = size - usedBytes;
    8569  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    8570 
    8571  // SECOND PASS
    8572  lastOffset = 0;
    8573 
    8574  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8575  {
    8576  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    8577  size_t nextAlloc2ndIndex = 0;
    8578  while(lastOffset < freeSpace2ndTo1stEnd)
    8579  {
    8580  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8581  while(nextAlloc2ndIndex < suballoc2ndCount &&
    8582  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8583  {
    8584  ++nextAlloc2ndIndex;
    8585  }
    8586 
    8587  // Found non-null allocation.
    8588  if(nextAlloc2ndIndex < suballoc2ndCount)
    8589  {
    8590  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8591 
    8592  // 1. Process free space before this allocation.
    8593  if(lastOffset < suballoc.offset)
    8594  {
    8595  // There is free space from lastOffset to suballoc.offset.
    8596  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8597  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8598  }
    8599 
    8600  // 2. Process this allocation.
    8601  // There is allocation with suballoc.offset, suballoc.size.
    8602  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8603 
    8604  // 3. Prepare for next iteration.
    8605  lastOffset = suballoc.offset + suballoc.size;
    8606  ++nextAlloc2ndIndex;
    8607  }
    8608  // We are at the end.
    8609  else
    8610  {
    8611  if(lastOffset < freeSpace2ndTo1stEnd)
    8612  {
    8613  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    8614  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    8615  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8616  }
    8617 
    8618  // End of loop.
    8619  lastOffset = freeSpace2ndTo1stEnd;
    8620  }
    8621  }
    8622  }
    8623 
    8624  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    8625  while(lastOffset < freeSpace1stTo2ndEnd)
    8626  {
    8627  // Find next non-null allocation or move nextAllocIndex to the end.
    8628  while(nextAlloc1stIndex < suballoc1stCount &&
    8629  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    8630  {
    8631  ++nextAlloc1stIndex;
    8632  }
    8633 
    8634  // Found non-null allocation.
    8635  if(nextAlloc1stIndex < suballoc1stCount)
    8636  {
    8637  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    8638 
    8639  // 1. Process free space before this allocation.
    8640  if(lastOffset < suballoc.offset)
    8641  {
    8642  // There is free space from lastOffset to suballoc.offset.
    8643  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8644  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8645  }
    8646 
    8647  // 2. Process this allocation.
    8648  // There is allocation with suballoc.offset, suballoc.size.
    8649  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8650 
    8651  // 3. Prepare for next iteration.
    8652  lastOffset = suballoc.offset + suballoc.size;
    8653  ++nextAlloc1stIndex;
    8654  }
    8655  // We are at the end.
    8656  else
    8657  {
    8658  if(lastOffset < freeSpace1stTo2ndEnd)
    8659  {
    8660  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    8661  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    8662  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8663  }
    8664 
    8665  // End of loop.
    8666  lastOffset = freeSpace1stTo2ndEnd;
    8667  }
    8668  }
    8669 
    8670  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8671  {
    8672  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    8673  while(lastOffset < size)
    8674  {
    8675  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    8676  while(nextAlloc2ndIndex != SIZE_MAX &&
    8677  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    8678  {
    8679  --nextAlloc2ndIndex;
    8680  }
    8681 
    8682  // Found non-null allocation.
    8683  if(nextAlloc2ndIndex != SIZE_MAX)
    8684  {
    8685  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    8686 
    8687  // 1. Process free space before this allocation.
    8688  if(lastOffset < suballoc.offset)
    8689  {
    8690  // There is free space from lastOffset to suballoc.offset.
    8691  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    8692  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8693  }
    8694 
    8695  // 2. Process this allocation.
    8696  // There is allocation with suballoc.offset, suballoc.size.
    8697  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    8698 
    8699  // 3. Prepare for next iteration.
    8700  lastOffset = suballoc.offset + suballoc.size;
    8701  --nextAlloc2ndIndex;
    8702  }
    8703  // We are at the end.
    8704  else
    8705  {
    8706  if(lastOffset < size)
    8707  {
    8708  // There is free space from lastOffset to size.
    8709  const VkDeviceSize unusedRangeSize = size - lastOffset;
    8710  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    8711  }
    8712 
    8713  // End of loop.
    8714  lastOffset = size;
    8715  }
    8716  }
    8717  }
    8718 
    8719  PrintDetailedMap_End(json);
    8720 }
    8721 #endif // #if VMA_STATS_STRING_ENABLED
    8722 
    8723 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    8724  uint32_t currentFrameIndex,
    8725  uint32_t frameInUseCount,
    8726  VkDeviceSize bufferImageGranularity,
    8727  VkDeviceSize allocSize,
    8728  VkDeviceSize allocAlignment,
    8729  bool upperAddress,
    8730  VmaSuballocationType allocType,
    8731  bool canMakeOtherLost,
    8732  uint32_t strategy,
    8733  VmaAllocationRequest* pAllocationRequest)
    8734 {
    8735  VMA_ASSERT(allocSize > 0);
    8736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    8738  VMA_HEAVY_ASSERT(Validate());
    8739 
    8740  const VkDeviceSize size = GetSize();
    8741  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8742  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8743 
    8744  if(upperAddress)
    8745  {
    8746  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8747  {
    8748  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    8749  return false;
    8750  }
    8751 
    8752  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    8753  if(allocSize > size)
    8754  {
    8755  return false;
    8756  }
    8757  VkDeviceSize resultBaseOffset = size - allocSize;
    8758  if(!suballocations2nd.empty())
    8759  {
    8760  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8761  resultBaseOffset = lastSuballoc.offset - allocSize;
    8762  if(allocSize > lastSuballoc.offset)
    8763  {
    8764  return false;
    8765  }
    8766  }
    8767 
    8768  // Start from offset equal to end of free space.
    8769  VkDeviceSize resultOffset = resultBaseOffset;
    8770 
    8771  // Apply VMA_DEBUG_MARGIN at the end.
    8772  if(VMA_DEBUG_MARGIN > 0)
    8773  {
    8774  if(resultOffset < VMA_DEBUG_MARGIN)
    8775  {
    8776  return false;
    8777  }
    8778  resultOffset -= VMA_DEBUG_MARGIN;
    8779  }
    8780 
    8781  // Apply alignment.
    8782  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    8783 
    8784  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    8785  // Make bigger alignment if necessary.
    8786  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8787  {
    8788  bool bufferImageGranularityConflict = false;
    8789  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8790  {
    8791  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8792  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8793  {
    8794  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    8795  {
    8796  bufferImageGranularityConflict = true;
    8797  break;
    8798  }
    8799  }
    8800  else
    8801  // Already on previous page.
    8802  break;
    8803  }
    8804  if(bufferImageGranularityConflict)
    8805  {
    8806  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    8807  }
    8808  }
    8809 
    8810  // There is enough free space.
    8811  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    8812  suballocations1st.back().offset + suballocations1st.back().size :
    8813  0;
    8814  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    8815  {
    8816  // Check previous suballocations for BufferImageGranularity conflicts.
    8817  // If conflict exists, allocation cannot be made here.
    8818  if(bufferImageGranularity > 1)
    8819  {
    8820  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8821  {
    8822  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8823  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8824  {
    8825  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    8826  {
    8827  return false;
    8828  }
    8829  }
    8830  else
    8831  {
    8832  // Already on next page.
    8833  break;
    8834  }
    8835  }
    8836  }
    8837 
    8838  // All tests passed: Success.
    8839  pAllocationRequest->offset = resultOffset;
    8840  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    8841  pAllocationRequest->sumItemSize = 0;
    8842  // pAllocationRequest->item unused.
    8843  pAllocationRequest->itemsToMakeLostCount = 0;
    8844  return true;
    8845  }
    8846  }
    8847  else // !upperAddress
    8848  {
    8849  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8850  {
    8851  // Try to allocate at the end of 1st vector.
    8852 
    8853  VkDeviceSize resultBaseOffset = 0;
    8854  if(!suballocations1st.empty())
    8855  {
    8856  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    8857  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8858  }
    8859 
    8860  // Start from offset equal to beginning of free space.
    8861  VkDeviceSize resultOffset = resultBaseOffset;
    8862 
    8863  // Apply VMA_DEBUG_MARGIN at the beginning.
    8864  if(VMA_DEBUG_MARGIN > 0)
    8865  {
    8866  resultOffset += VMA_DEBUG_MARGIN;
    8867  }
    8868 
    8869  // Apply alignment.
    8870  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8871 
    8872  // Check previous suballocations for BufferImageGranularity conflicts.
    8873  // Make bigger alignment if necessary.
    8874  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    8875  {
    8876  bool bufferImageGranularityConflict = false;
    8877  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    8878  {
    8879  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    8880  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8881  {
    8882  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8883  {
    8884  bufferImageGranularityConflict = true;
    8885  break;
    8886  }
    8887  }
    8888  else
    8889  // Already on previous page.
    8890  break;
    8891  }
    8892  if(bufferImageGranularityConflict)
    8893  {
    8894  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8895  }
    8896  }
    8897 
    8898  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    8899  suballocations2nd.back().offset : size;
    8900 
    8901  // There is enough free space at the end after alignment.
    8902  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    8903  {
    8904  // Check next suballocations for BufferImageGranularity conflicts.
    8905  // If conflict exists, allocation cannot be made here.
    8906  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8907  {
    8908  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    8909  {
    8910  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    8911  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8912  {
    8913  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8914  {
    8915  return false;
    8916  }
    8917  }
    8918  else
    8919  {
    8920  // Already on previous page.
    8921  break;
    8922  }
    8923  }
    8924  }
    8925 
    8926  // All tests passed: Success.
    8927  pAllocationRequest->offset = resultOffset;
    8928  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    8929  pAllocationRequest->sumItemSize = 0;
    8930  // pAllocationRequest->item unused.
    8931  pAllocationRequest->itemsToMakeLostCount = 0;
    8932  return true;
    8933  }
    8934  }
    8935 
    8936  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    8937  // beginning of 1st vector as the end of free space.
    8938  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8939  {
    8940  VMA_ASSERT(!suballocations1st.empty());
    8941 
    8942  VkDeviceSize resultBaseOffset = 0;
    8943  if(!suballocations2nd.empty())
    8944  {
    8945  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    8946  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    8947  }
    8948 
    8949  // Start from offset equal to beginning of free space.
    8950  VkDeviceSize resultOffset = resultBaseOffset;
    8951 
    8952  // Apply VMA_DEBUG_MARGIN at the beginning.
    8953  if(VMA_DEBUG_MARGIN > 0)
    8954  {
    8955  resultOffset += VMA_DEBUG_MARGIN;
    8956  }
    8957 
    8958  // Apply alignment.
    8959  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    8960 
    8961  // Check previous suballocations for BufferImageGranularity conflicts.
    8962  // Make bigger alignment if necessary.
    8963  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    8964  {
    8965  bool bufferImageGranularityConflict = false;
    8966  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    8967  {
    8968  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    8969  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    8970  {
    8971  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8972  {
    8973  bufferImageGranularityConflict = true;
    8974  break;
    8975  }
    8976  }
    8977  else
    8978  // Already on previous page.
    8979  break;
    8980  }
    8981  if(bufferImageGranularityConflict)
    8982  {
    8983  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    8984  }
    8985  }
    8986 
    8987  pAllocationRequest->itemsToMakeLostCount = 0;
    8988  pAllocationRequest->sumItemSize = 0;
    8989  size_t index1st = m_1stNullItemsBeginCount;
    8990 
    8991  if(canMakeOtherLost)
    8992  {
    8993  while(index1st < suballocations1st.size() &&
    8994  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    8995  {
    8996  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    8997  const VmaSuballocation& suballoc = suballocations1st[index1st];
    8998  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    8999  {
    9000  // No problem.
    9001  }
    9002  else
    9003  {
    9004  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9005  if(suballoc.hAllocation->CanBecomeLost() &&
    9006  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9007  {
    9008  ++pAllocationRequest->itemsToMakeLostCount;
    9009  pAllocationRequest->sumItemSize += suballoc.size;
    9010  }
    9011  else
    9012  {
    9013  return false;
    9014  }
    9015  }
    9016  ++index1st;
    9017  }
    9018 
    9019  // Check next suballocations for BufferImageGranularity conflicts.
    9020  // If conflict exists, we must mark more allocations lost or fail.
    9021  if(bufferImageGranularity > 1)
    9022  {
    9023  while(index1st < suballocations1st.size())
    9024  {
    9025  const VmaSuballocation& suballoc = suballocations1st[index1st];
    9026  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    9027  {
    9028  if(suballoc.hAllocation != VK_NULL_HANDLE)
    9029  {
    9030  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    9031  if(suballoc.hAllocation->CanBecomeLost() &&
    9032  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    9033  {
    9034  ++pAllocationRequest->itemsToMakeLostCount;
    9035  pAllocationRequest->sumItemSize += suballoc.size;
    9036  }
    9037  else
    9038  {
    9039  return false;
    9040  }
    9041  }
    9042  }
    9043  else
    9044  {
    9045  // Already on next page.
    9046  break;
    9047  }
    9048  ++index1st;
    9049  }
    9050  }
    9051  }
    9052 
    9053  // There is enough free space at the end after alignment.
    9054  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
    9055  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    9056  {
    9057  // Check next suballocations for BufferImageGranularity conflicts.
    9058  // If conflict exists, allocation cannot be made here.
    9059  if(bufferImageGranularity > 1)
    9060  {
    9061  for(size_t nextSuballocIndex = index1st;
    9062  nextSuballocIndex < suballocations1st.size();
    9063  nextSuballocIndex++)
    9064  {
    9065  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    9066  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9067  {
    9068  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9069  {
    9070  return false;
    9071  }
    9072  }
    9073  else
    9074  {
    9075  // Already on next page.
    9076  break;
    9077  }
    9078  }
    9079  }
    9080 
    9081  // All tests passed: Success.
    9082  pAllocationRequest->offset = resultOffset;
    9083  pAllocationRequest->sumFreeSize =
    9084  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    9085  - resultBaseOffset
    9086  - pAllocationRequest->sumItemSize;
    9087  // pAllocationRequest->item unused.
    9088  return true;
    9089  }
    9090  }
    9091  }
    9092 
    9093  return false;
    9094 }
    9095 
    9096 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    9097  uint32_t currentFrameIndex,
    9098  uint32_t frameInUseCount,
    9099  VmaAllocationRequest* pAllocationRequest)
    9100 {
    9101  if(pAllocationRequest->itemsToMakeLostCount == 0)
    9102  {
    9103  return true;
    9104  }
    9105 
    9106  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    9107 
    9108  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9109  size_t index1st = m_1stNullItemsBeginCount;
    9110  size_t madeLostCount = 0;
    9111  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    9112  {
    9113  VMA_ASSERT(index1st < suballocations1st.size());
    9114  VmaSuballocation& suballoc = suballocations1st[index1st];
    9115  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9116  {
    9117  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    9118  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    9119  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9120  {
    9121  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9122  suballoc.hAllocation = VK_NULL_HANDLE;
    9123  m_SumFreeSize += suballoc.size;
    9124  ++m_1stNullItemsMiddleCount;
    9125  ++madeLostCount;
    9126  }
    9127  else
    9128  {
    9129  return false;
    9130  }
    9131  }
    9132  ++index1st;
    9133  }
    9134 
    9135  CleanupAfterFree();
    9136  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    9137 
    9138  return true;
    9139 }
    9140 
    9141 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9142 {
    9143  uint32_t lostAllocationCount = 0;
    9144 
    9145  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9146  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9147  {
    9148  VmaSuballocation& suballoc = suballocations1st[i];
    9149  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9150  suballoc.hAllocation->CanBecomeLost() &&
    9151  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9152  {
    9153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9154  suballoc.hAllocation = VK_NULL_HANDLE;
    9155  ++m_1stNullItemsMiddleCount;
    9156  m_SumFreeSize += suballoc.size;
    9157  ++lostAllocationCount;
    9158  }
    9159  }
    9160 
    9161  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9162  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9163  {
    9164  VmaSuballocation& suballoc = suballocations2nd[i];
    9165  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    9166  suballoc.hAllocation->CanBecomeLost() &&
    9167  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    9168  {
    9169  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9170  suballoc.hAllocation = VK_NULL_HANDLE;
    9171  ++m_2ndNullItemsCount;
    9172  ++lostAllocationCount;
    9173  }
    9174  }
    9175 
    9176  if(lostAllocationCount)
    9177  {
    9178  CleanupAfterFree();
    9179  }
    9180 
    9181  return lostAllocationCount;
    9182 }
    9183 
    9184 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    9185 {
    9186  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9187  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    9188  {
    9189  const VmaSuballocation& suballoc = suballocations1st[i];
    9190  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9191  {
    9192  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9193  {
    9194  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9195  return VK_ERROR_VALIDATION_FAILED_EXT;
    9196  }
    9197  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9198  {
    9199  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9200  return VK_ERROR_VALIDATION_FAILED_EXT;
    9201  }
    9202  }
    9203  }
    9204 
    9205  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9206  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    9207  {
    9208  const VmaSuballocation& suballoc = suballocations2nd[i];
    9209  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    9210  {
    9211  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    9212  {
    9213  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    9214  return VK_ERROR_VALIDATION_FAILED_EXT;
    9215  }
    9216  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    9217  {
    9218  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    9219  return VK_ERROR_VALIDATION_FAILED_EXT;
    9220  }
    9221  }
    9222  }
    9223 
    9224  return VK_SUCCESS;
    9225 }
    9226 
    9227 void VmaBlockMetadata_Linear::Alloc(
    9228  const VmaAllocationRequest& request,
    9229  VmaSuballocationType type,
    9230  VkDeviceSize allocSize,
    9231  bool upperAddress,
    9232  VmaAllocation hAllocation)
    9233 {
    9234  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    9235 
    9236  if(upperAddress)
    9237  {
    9238  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    9239  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    9240  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9241  suballocations2nd.push_back(newSuballoc);
    9242  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    9243  }
    9244  else
    9245  {
    9246  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9247 
    9248  // First allocation.
    9249  if(suballocations1st.empty())
    9250  {
    9251  suballocations1st.push_back(newSuballoc);
    9252  }
    9253  else
    9254  {
    9255  // New allocation at the end of 1st vector.
    9256  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
    9257  {
    9258  // Check if it fits before the end of the block.
    9259  VMA_ASSERT(request.offset + allocSize <= GetSize());
    9260  suballocations1st.push_back(newSuballoc);
    9261  }
    9262  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    9263  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
    9264  {
    9265  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9266 
    9267  switch(m_2ndVectorMode)
    9268  {
    9269  case SECOND_VECTOR_EMPTY:
    9270  // First allocation from second part ring buffer.
    9271  VMA_ASSERT(suballocations2nd.empty());
    9272  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    9273  break;
    9274  case SECOND_VECTOR_RING_BUFFER:
    9275  // 2-part ring buffer is already started.
    9276  VMA_ASSERT(!suballocations2nd.empty());
    9277  break;
    9278  case SECOND_VECTOR_DOUBLE_STACK:
    9279  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    9280  break;
    9281  default:
    9282  VMA_ASSERT(0);
    9283  }
    9284 
    9285  suballocations2nd.push_back(newSuballoc);
    9286  }
    9287  else
    9288  {
    9289  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    9290  }
    9291  }
    9292  }
    9293 
    9294  m_SumFreeSize -= newSuballoc.size;
    9295 }
    9296 
    9297 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    9298 {
    9299  FreeAtOffset(allocation->GetOffset());
    9300 }
    9301 
    9302 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    9303 {
    9304  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9305  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9306 
    9307  if(!suballocations1st.empty())
    9308  {
    9309  // First allocation: Mark it as next empty at the beginning.
    9310  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    9311  if(firstSuballoc.offset == offset)
    9312  {
    9313  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    9314  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    9315  m_SumFreeSize += firstSuballoc.size;
    9316  ++m_1stNullItemsBeginCount;
    9317  CleanupAfterFree();
    9318  return;
    9319  }
    9320  }
    9321 
    9322  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    9323  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    9324  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9325  {
    9326  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9327  if(lastSuballoc.offset == offset)
    9328  {
    9329  m_SumFreeSize += lastSuballoc.size;
    9330  suballocations2nd.pop_back();
    9331  CleanupAfterFree();
    9332  return;
    9333  }
    9334  }
    9335  // Last allocation in 1st vector.
    9336  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    9337  {
    9338  VmaSuballocation& lastSuballoc = suballocations1st.back();
    9339  if(lastSuballoc.offset == offset)
    9340  {
    9341  m_SumFreeSize += lastSuballoc.size;
    9342  suballocations1st.pop_back();
    9343  CleanupAfterFree();
    9344  return;
    9345  }
    9346  }
    9347 
    9348  // Item from the middle of 1st vector.
    9349  {
    9350  VmaSuballocation refSuballoc;
    9351  refSuballoc.offset = offset;
    9352  // Rest of members stays uninitialized intentionally for better performance.
    9353  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    9354  suballocations1st.begin() + m_1stNullItemsBeginCount,
    9355  suballocations1st.end(),
    9356  refSuballoc);
    9357  if(it != suballocations1st.end())
    9358  {
    9359  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9360  it->hAllocation = VK_NULL_HANDLE;
    9361  ++m_1stNullItemsMiddleCount;
    9362  m_SumFreeSize += it->size;
    9363  CleanupAfterFree();
    9364  return;
    9365  }
    9366  }
    9367 
    9368  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    9369  {
    9370  // Item from the middle of 2nd vector.
    9371  VmaSuballocation refSuballoc;
    9372  refSuballoc.offset = offset;
    9373  // Rest of members stays uninitialized intentionally for better performance.
    9374  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    9375  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    9376  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    9377  if(it != suballocations2nd.end())
    9378  {
    9379  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    9380  it->hAllocation = VK_NULL_HANDLE;
    9381  ++m_2ndNullItemsCount;
    9382  m_SumFreeSize += it->size;
    9383  CleanupAfterFree();
    9384  return;
    9385  }
    9386  }
    9387 
    9388  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    9389 }
    9390 
    9391 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    9392 {
    9393  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9394  const size_t suballocCount = AccessSuballocations1st().size();
    9395  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    9396 }
    9397 
    9398 void VmaBlockMetadata_Linear::CleanupAfterFree()
    9399 {
    9400  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9401  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9402 
    9403  if(IsEmpty())
    9404  {
    9405  suballocations1st.clear();
    9406  suballocations2nd.clear();
    9407  m_1stNullItemsBeginCount = 0;
    9408  m_1stNullItemsMiddleCount = 0;
    9409  m_2ndNullItemsCount = 0;
    9410  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9411  }
    9412  else
    9413  {
    9414  const size_t suballoc1stCount = suballocations1st.size();
    9415  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    9416  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    9417 
    9418  // Find more null items at the beginning of 1st vector.
    9419  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    9420  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9421  {
    9422  ++m_1stNullItemsBeginCount;
    9423  --m_1stNullItemsMiddleCount;
    9424  }
    9425 
    9426  // Find more null items at the end of 1st vector.
    9427  while(m_1stNullItemsMiddleCount > 0 &&
    9428  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    9429  {
    9430  --m_1stNullItemsMiddleCount;
    9431  suballocations1st.pop_back();
    9432  }
    9433 
    9434  // Find more null items at the end of 2nd vector.
    9435  while(m_2ndNullItemsCount > 0 &&
    9436  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    9437  {
    9438  --m_2ndNullItemsCount;
    9439  suballocations2nd.pop_back();
    9440  }
    9441 
    9442  if(ShouldCompact1st())
    9443  {
    9444  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    9445  size_t srcIndex = m_1stNullItemsBeginCount;
    9446  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    9447  {
    9448  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    9449  {
    9450  ++srcIndex;
    9451  }
    9452  if(dstIndex != srcIndex)
    9453  {
    9454  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    9455  }
    9456  ++srcIndex;
    9457  }
    9458  suballocations1st.resize(nonNullItemCount);
    9459  m_1stNullItemsBeginCount = 0;
    9460  m_1stNullItemsMiddleCount = 0;
    9461  }
    9462 
    9463  // 2nd vector became empty.
    9464  if(suballocations2nd.empty())
    9465  {
    9466  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9467  }
    9468 
    9469  // 1st vector became empty.
    9470  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    9471  {
    9472  suballocations1st.clear();
    9473  m_1stNullItemsBeginCount = 0;
    9474 
    9475  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9476  {
    9477  // Swap 1st with 2nd. Now 2nd is empty.
    9478  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    9479  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    9480  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    9481  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    9482  {
    9483  ++m_1stNullItemsBeginCount;
    9484  --m_1stNullItemsMiddleCount;
    9485  }
    9486  m_2ndNullItemsCount = 0;
    9487  m_1stVectorIndex ^= 1;
    9488  }
    9489  }
    9490  }
    9491 
    9492  VMA_HEAVY_ASSERT(Validate());
    9493 }
    9494 
    9495 
    9497 // class VmaBlockMetadata_Buddy
    9498 
    9499 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    9500  VmaBlockMetadata(hAllocator),
    9501  m_Root(VMA_NULL),
    9502  m_AllocationCount(0),
    9503  m_FreeCount(1),
    9504  m_SumFreeSize(0)
    9505 {
    9506  memset(m_FreeList, 0, sizeof(m_FreeList));
    9507 }
    9508 
    9509 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    9510 {
    9511  DeleteNode(m_Root);
    9512 }
    9513 
    9514 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    9515 {
    9516  VmaBlockMetadata::Init(size);
    9517 
    9518  m_UsableSize = VmaPrevPow2(size);
    9519  m_SumFreeSize = m_UsableSize;
    9520 
    9521  // Calculate m_LevelCount.
    9522  m_LevelCount = 1;
    9523  while(m_LevelCount < MAX_LEVELS &&
    9524  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    9525  {
    9526  ++m_LevelCount;
    9527  }
    9528 
    9529  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    9530  rootNode->offset = 0;
    9531  rootNode->type = Node::TYPE_FREE;
    9532  rootNode->parent = VMA_NULL;
    9533  rootNode->buddy = VMA_NULL;
    9534 
    9535  m_Root = rootNode;
    9536  AddToFreeListFront(0, rootNode);
    9537 }
    9538 
    9539 bool VmaBlockMetadata_Buddy::Validate() const
    9540 {
    9541  // Validate tree.
    9542  ValidationContext ctx;
    9543  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    9544  {
    9545  VMA_VALIDATE(false && "ValidateNode failed.");
    9546  }
    9547  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    9548  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    9549 
    9550  // Validate free node lists.
    9551  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9552  {
    9553  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    9554  m_FreeList[level].front->free.prev == VMA_NULL);
    9555 
    9556  for(Node* node = m_FreeList[level].front;
    9557  node != VMA_NULL;
    9558  node = node->free.next)
    9559  {
    9560  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    9561 
    9562  if(node->free.next == VMA_NULL)
    9563  {
    9564  VMA_VALIDATE(m_FreeList[level].back == node);
    9565  }
    9566  else
    9567  {
    9568  VMA_VALIDATE(node->free.next->free.prev == node);
    9569  }
    9570  }
    9571  }
    9572 
    9573  // Validate that free lists ar higher levels are empty.
    9574  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    9575  {
    9576  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    9577  }
    9578 
    9579  return true;
    9580 }
    9581 
    9582 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    9583 {
    9584  for(uint32_t level = 0; level < m_LevelCount; ++level)
    9585  {
    9586  if(m_FreeList[level].front != VMA_NULL)
    9587  {
    9588  return LevelToNodeSize(level);
    9589  }
    9590  }
    9591  return 0;
    9592 }
    9593 
    9594 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9595 {
    9596  const VkDeviceSize unusableSize = GetUnusableSize();
    9597 
    9598  outInfo.blockCount = 1;
    9599 
    9600  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    9601  outInfo.usedBytes = outInfo.unusedBytes = 0;
    9602 
    9603  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    9604  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    9605  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    9606 
    9607  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    9608 
    9609  if(unusableSize > 0)
    9610  {
    9611  ++outInfo.unusedRangeCount;
    9612  outInfo.unusedBytes += unusableSize;
    9613  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    9614  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    9615  }
    9616 }
    9617 
    9618 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    9619 {
    9620  const VkDeviceSize unusableSize = GetUnusableSize();
    9621 
    9622  inoutStats.size += GetSize();
    9623  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    9624  inoutStats.allocationCount += m_AllocationCount;
    9625  inoutStats.unusedRangeCount += m_FreeCount;
    9626  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    9627 
    9628  if(unusableSize > 0)
    9629  {
    9630  ++inoutStats.unusedRangeCount;
    9631  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    9632  }
    9633 }
    9634 
    9635 #if VMA_STATS_STRING_ENABLED
    9636 
    9637 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    9638 {
    9639  // TODO optimize
    9640  VmaStatInfo stat;
    9641  CalcAllocationStatInfo(stat);
    9642 
    9643  PrintDetailedMap_Begin(
    9644  json,
    9645  stat.unusedBytes,
    9646  stat.allocationCount,
    9647  stat.unusedRangeCount);
    9648 
    9649  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    9650 
    9651  const VkDeviceSize unusableSize = GetUnusableSize();
    9652  if(unusableSize > 0)
    9653  {
    9654  PrintDetailedMap_UnusedRange(json,
    9655  m_UsableSize, // offset
    9656  unusableSize); // size
    9657  }
    9658 
    9659  PrintDetailedMap_End(json);
    9660 }
    9661 
    9662 #endif // #if VMA_STATS_STRING_ENABLED
    9663 
    9664 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    9665  uint32_t currentFrameIndex,
    9666  uint32_t frameInUseCount,
    9667  VkDeviceSize bufferImageGranularity,
    9668  VkDeviceSize allocSize,
    9669  VkDeviceSize allocAlignment,
    9670  bool upperAddress,
    9671  VmaSuballocationType allocType,
    9672  bool canMakeOtherLost,
    9673  uint32_t strategy,
    9674  VmaAllocationRequest* pAllocationRequest)
    9675 {
    9676  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    9677 
    9678  // Simple way to respect bufferImageGranularity. May be optimized some day.
    9679  // Whenever it might be an OPTIMAL image...
    9680  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    9681  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    9682  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    9683  {
    9684  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    9685  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    9686  }
    9687 
    9688  if(allocSize > m_UsableSize)
    9689  {
    9690  return false;
    9691  }
    9692 
    9693  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9694  for(uint32_t level = targetLevel + 1; level--; )
    9695  {
    9696  for(Node* freeNode = m_FreeList[level].front;
    9697  freeNode != VMA_NULL;
    9698  freeNode = freeNode->free.next)
    9699  {
    9700  if(freeNode->offset % allocAlignment == 0)
    9701  {
    9702  pAllocationRequest->offset = freeNode->offset;
    9703  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    9704  pAllocationRequest->sumItemSize = 0;
    9705  pAllocationRequest->itemsToMakeLostCount = 0;
    9706  pAllocationRequest->customData = (void*)(uintptr_t)level;
    9707  return true;
    9708  }
    9709  }
    9710  }
    9711 
    9712  return false;
    9713 }
    9714 
    9715 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    9716  uint32_t currentFrameIndex,
    9717  uint32_t frameInUseCount,
    9718  VmaAllocationRequest* pAllocationRequest)
    9719 {
    9720  /*
    9721  Lost allocations are not supported in buddy allocator at the moment.
    9722  Support might be added in the future.
    9723  */
    9724  return pAllocationRequest->itemsToMakeLostCount == 0;
    9725 }
    9726 
    9727 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    9728 {
    9729  /*
    9730  Lost allocations are not supported in buddy allocator at the moment.
    9731  Support might be added in the future.
    9732  */
    9733  return 0;
    9734 }
    9735 
    9736 void VmaBlockMetadata_Buddy::Alloc(
    9737  const VmaAllocationRequest& request,
    9738  VmaSuballocationType type,
    9739  VkDeviceSize allocSize,
    9740  bool upperAddress,
    9741  VmaAllocation hAllocation)
    9742 {
    9743  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    9744  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    9745 
    9746  Node* currNode = m_FreeList[currLevel].front;
    9747  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9748  while(currNode->offset != request.offset)
    9749  {
    9750  currNode = currNode->free.next;
    9751  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    9752  }
    9753 
    9754  // Go down, splitting free nodes.
    9755  while(currLevel < targetLevel)
    9756  {
    9757  // currNode is already first free node at currLevel.
    9758  // Remove it from list of free nodes at this currLevel.
    9759  RemoveFromFreeList(currLevel, currNode);
    9760 
    9761  const uint32_t childrenLevel = currLevel + 1;
    9762 
    9763  // Create two free sub-nodes.
    9764  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    9765  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    9766 
    9767  leftChild->offset = currNode->offset;
    9768  leftChild->type = Node::TYPE_FREE;
    9769  leftChild->parent = currNode;
    9770  leftChild->buddy = rightChild;
    9771 
    9772  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    9773  rightChild->type = Node::TYPE_FREE;
    9774  rightChild->parent = currNode;
    9775  rightChild->buddy = leftChild;
    9776 
    9777  // Convert current currNode to split type.
    9778  currNode->type = Node::TYPE_SPLIT;
    9779  currNode->split.leftChild = leftChild;
    9780 
    9781  // Add child nodes to free list. Order is important!
    9782  AddToFreeListFront(childrenLevel, rightChild);
    9783  AddToFreeListFront(childrenLevel, leftChild);
    9784 
    9785  ++m_FreeCount;
    9786  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    9787  ++currLevel;
    9788  currNode = m_FreeList[currLevel].front;
    9789 
    9790  /*
    9791  We can be sure that currNode, as left child of node previously split,
    9792  also fullfills the alignment requirement.
    9793  */
    9794  }
    9795 
    9796  // Remove from free list.
    9797  VMA_ASSERT(currLevel == targetLevel &&
    9798  currNode != VMA_NULL &&
    9799  currNode->type == Node::TYPE_FREE);
    9800  RemoveFromFreeList(currLevel, currNode);
    9801 
    9802  // Convert to allocation node.
    9803  currNode->type = Node::TYPE_ALLOCATION;
    9804  currNode->allocation.alloc = hAllocation;
    9805 
    9806  ++m_AllocationCount;
    9807  --m_FreeCount;
    9808  m_SumFreeSize -= allocSize;
    9809 }
    9810 
    9811 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    9812 {
    9813  if(node->type == Node::TYPE_SPLIT)
    9814  {
    9815  DeleteNode(node->split.leftChild->buddy);
    9816  DeleteNode(node->split.leftChild);
    9817  }
    9818 
    9819  vma_delete(GetAllocationCallbacks(), node);
    9820 }
    9821 
    9822 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    9823 {
    9824  VMA_VALIDATE(level < m_LevelCount);
    9825  VMA_VALIDATE(curr->parent == parent);
    9826  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    9827  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    9828  switch(curr->type)
    9829  {
    9830  case Node::TYPE_FREE:
    9831  // curr->free.prev, next are validated separately.
    9832  ctx.calculatedSumFreeSize += levelNodeSize;
    9833  ++ctx.calculatedFreeCount;
    9834  break;
    9835  case Node::TYPE_ALLOCATION:
    9836  ++ctx.calculatedAllocationCount;
    9837  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    9838  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    9839  break;
    9840  case Node::TYPE_SPLIT:
    9841  {
    9842  const uint32_t childrenLevel = level + 1;
    9843  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    9844  const Node* const leftChild = curr->split.leftChild;
    9845  VMA_VALIDATE(leftChild != VMA_NULL);
    9846  VMA_VALIDATE(leftChild->offset == curr->offset);
    9847  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    9848  {
    9849  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    9850  }
    9851  const Node* const rightChild = leftChild->buddy;
    9852  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    9853  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    9854  {
    9855  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    9856  }
    9857  }
    9858  break;
    9859  default:
    9860  return false;
    9861  }
    9862 
    9863  return true;
    9864 }
    9865 
    9866 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    9867 {
    9868  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    9869  uint32_t level = 0;
    9870  VkDeviceSize currLevelNodeSize = m_UsableSize;
    9871  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    9872  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    9873  {
    9874  ++level;
    9875  currLevelNodeSize = nextLevelNodeSize;
    9876  nextLevelNodeSize = currLevelNodeSize >> 1;
    9877  }
    9878  return level;
    9879 }
    9880 
    9881 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    9882 {
    9883  // Find node and level.
    9884  Node* node = m_Root;
    9885  VkDeviceSize nodeOffset = 0;
    9886  uint32_t level = 0;
    9887  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    9888  while(node->type == Node::TYPE_SPLIT)
    9889  {
    9890  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    9891  if(offset < nodeOffset + nextLevelSize)
    9892  {
    9893  node = node->split.leftChild;
    9894  }
    9895  else
    9896  {
    9897  node = node->split.leftChild->buddy;
    9898  nodeOffset += nextLevelSize;
    9899  }
    9900  ++level;
    9901  levelNodeSize = nextLevelSize;
    9902  }
    9903 
    9904  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    9905  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    9906 
    9907  ++m_FreeCount;
    9908  --m_AllocationCount;
    9909  m_SumFreeSize += alloc->GetSize();
    9910 
    9911  node->type = Node::TYPE_FREE;
    9912 
    9913  // Join free nodes if possible.
    9914  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    9915  {
    9916  RemoveFromFreeList(level, node->buddy);
    9917  Node* const parent = node->parent;
    9918 
    9919  vma_delete(GetAllocationCallbacks(), node->buddy);
    9920  vma_delete(GetAllocationCallbacks(), node);
    9921  parent->type = Node::TYPE_FREE;
    9922 
    9923  node = parent;
    9924  --level;
    9925  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    9926  --m_FreeCount;
    9927  }
    9928 
    9929  AddToFreeListFront(level, node);
    9930 }
    9931 
    9932 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    9933 {
    9934  switch(node->type)
    9935  {
    9936  case Node::TYPE_FREE:
    9937  ++outInfo.unusedRangeCount;
    9938  outInfo.unusedBytes += levelNodeSize;
    9939  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    9940  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    9941  break;
    9942  case Node::TYPE_ALLOCATION:
    9943  {
    9944  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    9945  ++outInfo.allocationCount;
    9946  outInfo.usedBytes += allocSize;
    9947  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    9948  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    9949 
    9950  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    9951  if(unusedRangeSize > 0)
    9952  {
    9953  ++outInfo.unusedRangeCount;
    9954  outInfo.unusedBytes += unusedRangeSize;
    9955  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9956  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9957  }
    9958  }
    9959  break;
    9960  case Node::TYPE_SPLIT:
    9961  {
    9962  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    9963  const Node* const leftChild = node->split.leftChild;
    9964  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    9965  const Node* const rightChild = leftChild->buddy;
    9966  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    9967  }
    9968  break;
    9969  default:
    9970  VMA_ASSERT(0);
    9971  }
    9972 }
    9973 
    9974 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    9975 {
    9976  VMA_ASSERT(node->type == Node::TYPE_FREE);
    9977 
    9978  // List is empty.
    9979  Node* const frontNode = m_FreeList[level].front;
    9980  if(frontNode == VMA_NULL)
    9981  {
    9982  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    9983  node->free.prev = node->free.next = VMA_NULL;
    9984  m_FreeList[level].front = m_FreeList[level].back = node;
    9985  }
    9986  else
    9987  {
    9988  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    9989  node->free.prev = VMA_NULL;
    9990  node->free.next = frontNode;
    9991  frontNode->free.prev = node;
    9992  m_FreeList[level].front = node;
    9993  }
    9994 }
    9995 
    9996 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    9997 {
    9998  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    9999 
    10000  // It is at the front.
    10001  if(node->free.prev == VMA_NULL)
    10002  {
    10003  VMA_ASSERT(m_FreeList[level].front == node);
    10004  m_FreeList[level].front = node->free.next;
    10005  }
    10006  else
    10007  {
    10008  Node* const prevFreeNode = node->free.prev;
    10009  VMA_ASSERT(prevFreeNode->free.next == node);
    10010  prevFreeNode->free.next = node->free.next;
    10011  }
    10012 
    10013  // It is at the back.
    10014  if(node->free.next == VMA_NULL)
    10015  {
    10016  VMA_ASSERT(m_FreeList[level].back == node);
    10017  m_FreeList[level].back = node->free.prev;
    10018  }
    10019  else
    10020  {
    10021  Node* const nextFreeNode = node->free.next;
    10022  VMA_ASSERT(nextFreeNode->free.prev == node);
    10023  nextFreeNode->free.prev = node->free.prev;
    10024  }
    10025 }
    10026 
    10027 #if VMA_STATS_STRING_ENABLED
    10028 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    10029 {
    10030  switch(node->type)
    10031  {
    10032  case Node::TYPE_FREE:
    10033  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    10034  break;
    10035  case Node::TYPE_ALLOCATION:
    10036  {
    10037  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    10038  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10039  if(allocSize < levelNodeSize)
    10040  {
    10041  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    10042  }
    10043  }
    10044  break;
    10045  case Node::TYPE_SPLIT:
    10046  {
    10047  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    10048  const Node* const leftChild = node->split.leftChild;
    10049  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    10050  const Node* const rightChild = leftChild->buddy;
    10051  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    10052  }
    10053  break;
    10054  default:
    10055  VMA_ASSERT(0);
    10056  }
    10057 }
    10058 #endif // #if VMA_STATS_STRING_ENABLED
    10059 
    10060 
    10062 // class VmaDeviceMemoryBlock
    10063 
    10064 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    10065  m_pMetadata(VMA_NULL),
    10066  m_MemoryTypeIndex(UINT32_MAX),
    10067  m_Id(0),
    10068  m_hMemory(VK_NULL_HANDLE),
    10069  m_MapCount(0),
    10070  m_pMappedData(VMA_NULL)
    10071 {
    10072 }
    10073 
    10074 void VmaDeviceMemoryBlock::Init(
    10075  VmaAllocator hAllocator,
    10076  uint32_t newMemoryTypeIndex,
    10077  VkDeviceMemory newMemory,
    10078  VkDeviceSize newSize,
    10079  uint32_t id,
    10080  uint32_t algorithm)
    10081 {
    10082  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    10083 
    10084  m_MemoryTypeIndex = newMemoryTypeIndex;
    10085  m_Id = id;
    10086  m_hMemory = newMemory;
    10087 
    10088  switch(algorithm)
    10089  {
    10091  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    10092  break;
    10094  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    10095  break;
    10096  default:
    10097  VMA_ASSERT(0);
    10098  // Fall-through.
    10099  case 0:
    10100  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    10101  }
    10102  m_pMetadata->Init(newSize);
    10103 }
    10104 
    10105 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    10106 {
    10107  // This is the most important assert in the entire library.
    10108  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    10109  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    10110 
    10111  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    10112  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    10113  m_hMemory = VK_NULL_HANDLE;
    10114 
    10115  vma_delete(allocator, m_pMetadata);
    10116  m_pMetadata = VMA_NULL;
    10117 }
    10118 
    10119 bool VmaDeviceMemoryBlock::Validate() const
    10120 {
    10121  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    10122  (m_pMetadata->GetSize() != 0));
    10123 
    10124  return m_pMetadata->Validate();
    10125 }
    10126 
    10127 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    10128 {
    10129  void* pData = nullptr;
    10130  VkResult res = Map(hAllocator, 1, &pData);
    10131  if(res != VK_SUCCESS)
    10132  {
    10133  return res;
    10134  }
    10135 
    10136  res = m_pMetadata->CheckCorruption(pData);
    10137 
    10138  Unmap(hAllocator, 1);
    10139 
    10140  return res;
    10141 }
    10142 
    10143 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    10144 {
    10145  if(count == 0)
    10146  {
    10147  return VK_SUCCESS;
    10148  }
    10149 
    10150  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10151  if(m_MapCount != 0)
    10152  {
    10153  m_MapCount += count;
    10154  VMA_ASSERT(m_pMappedData != VMA_NULL);
    10155  if(ppData != VMA_NULL)
    10156  {
    10157  *ppData = m_pMappedData;
    10158  }
    10159  return VK_SUCCESS;
    10160  }
    10161  else
    10162  {
    10163  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    10164  hAllocator->m_hDevice,
    10165  m_hMemory,
    10166  0, // offset
    10167  VK_WHOLE_SIZE,
    10168  0, // flags
    10169  &m_pMappedData);
    10170  if(result == VK_SUCCESS)
    10171  {
    10172  if(ppData != VMA_NULL)
    10173  {
    10174  *ppData = m_pMappedData;
    10175  }
    10176  m_MapCount = count;
    10177  }
    10178  return result;
    10179  }
    10180 }
    10181 
    10182 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    10183 {
    10184  if(count == 0)
    10185  {
    10186  return;
    10187  }
    10188 
    10189  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10190  if(m_MapCount >= count)
    10191  {
    10192  m_MapCount -= count;
    10193  if(m_MapCount == 0)
    10194  {
    10195  m_pMappedData = VMA_NULL;
    10196  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    10197  }
    10198  }
    10199  else
    10200  {
    10201  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    10202  }
    10203 }
    10204 
    10205 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10206 {
    10207  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10208  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10209 
    10210  void* pData;
    10211  VkResult res = Map(hAllocator, 1, &pData);
    10212  if(res != VK_SUCCESS)
    10213  {
    10214  return res;
    10215  }
    10216 
    10217  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    10218  VmaWriteMagicValue(pData, allocOffset + allocSize);
    10219 
    10220  Unmap(hAllocator, 1);
    10221 
    10222  return VK_SUCCESS;
    10223 }
    10224 
    10225 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    10226 {
    10227  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    10228  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    10229 
    10230  void* pData;
    10231  VkResult res = Map(hAllocator, 1, &pData);
    10232  if(res != VK_SUCCESS)
    10233  {
    10234  return res;
    10235  }
    10236 
    10237  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    10240  }
    10241  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    10242  {
    10243  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    10244  }
    10245 
    10246  Unmap(hAllocator, 1);
    10247 
    10248  return VK_SUCCESS;
    10249 }
    10250 
    10251 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    10252  const VmaAllocator hAllocator,
    10253  const VmaAllocation hAllocation,
    10254  VkBuffer hBuffer)
    10255 {
    10256  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10257  hAllocation->GetBlock() == this);
    10258  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10259  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10260  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    10261  hAllocator->m_hDevice,
    10262  hBuffer,
    10263  m_hMemory,
    10264  hAllocation->GetOffset());
    10265 }
    10266 
    10267 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    10268  const VmaAllocator hAllocator,
    10269  const VmaAllocation hAllocation,
    10270  VkImage hImage)
    10271 {
    10272  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    10273  hAllocation->GetBlock() == this);
    10274  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    10275  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    10276  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    10277  hAllocator->m_hDevice,
    10278  hImage,
    10279  m_hMemory,
    10280  hAllocation->GetOffset());
    10281 }
    10282 
    10283 static void InitStatInfo(VmaStatInfo& outInfo)
    10284 {
    10285  memset(&outInfo, 0, sizeof(outInfo));
    10286  outInfo.allocationSizeMin = UINT64_MAX;
    10287  outInfo.unusedRangeSizeMin = UINT64_MAX;
    10288 }
    10289 
    10290 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    10291 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    10292 {
    10293  inoutInfo.blockCount += srcInfo.blockCount;
    10294  inoutInfo.allocationCount += srcInfo.allocationCount;
    10295  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    10296  inoutInfo.usedBytes += srcInfo.usedBytes;
    10297  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    10298  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    10299  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    10300  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    10301  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    10302 }
    10303 
    10304 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    10305 {
    10306  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    10307  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    10308  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    10309  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    10310 }
    10311 
    10312 VmaPool_T::VmaPool_T(
    10313  VmaAllocator hAllocator,
    10314  const VmaPoolCreateInfo& createInfo,
    10315  VkDeviceSize preferredBlockSize) :
    10316  m_BlockVector(
    10317  hAllocator,
    10318  createInfo.memoryTypeIndex,
    10319  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    10320  createInfo.minBlockCount,
    10321  createInfo.maxBlockCount,
    10322  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    10323  createInfo.frameInUseCount,
    10324  true, // isCustomPool
    10325  createInfo.blockSize != 0, // explicitBlockSize
    10326  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    10327  m_Id(0)
    10328 {
    10329 }
    10330 
    10331 VmaPool_T::~VmaPool_T()
    10332 {
    10333 }
    10334 
    10335 #if VMA_STATS_STRING_ENABLED
    10336 
    10337 #endif // #if VMA_STATS_STRING_ENABLED
    10338 
    10339 VmaBlockVector::VmaBlockVector(
    10340  VmaAllocator hAllocator,
    10341  uint32_t memoryTypeIndex,
    10342  VkDeviceSize preferredBlockSize,
    10343  size_t minBlockCount,
    10344  size_t maxBlockCount,
    10345  VkDeviceSize bufferImageGranularity,
    10346  uint32_t frameInUseCount,
    10347  bool isCustomPool,
    10348  bool explicitBlockSize,
    10349  uint32_t algorithm) :
    10350  m_hAllocator(hAllocator),
    10351  m_MemoryTypeIndex(memoryTypeIndex),
    10352  m_PreferredBlockSize(preferredBlockSize),
    10353  m_MinBlockCount(minBlockCount),
    10354  m_MaxBlockCount(maxBlockCount),
    10355  m_BufferImageGranularity(bufferImageGranularity),
    10356  m_FrameInUseCount(frameInUseCount),
    10357  m_IsCustomPool(isCustomPool),
    10358  m_ExplicitBlockSize(explicitBlockSize),
    10359  m_Algorithm(algorithm),
    10360  m_HasEmptyBlock(false),
    10361  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    10362  m_pDefragmentator(VMA_NULL),
    10363  m_NextBlockId(0)
    10364 {
    10365 }
    10366 
    10367 VmaBlockVector::~VmaBlockVector()
    10368 {
    10369  VMA_ASSERT(m_pDefragmentator == VMA_NULL);
    10370 
    10371  for(size_t i = m_Blocks.size(); i--; )
    10372  {
    10373  m_Blocks[i]->Destroy(m_hAllocator);
    10374  vma_delete(m_hAllocator, m_Blocks[i]);
    10375  }
    10376 }
    10377 
    10378 VkResult VmaBlockVector::CreateMinBlocks()
    10379 {
    10380  for(size_t i = 0; i < m_MinBlockCount; ++i)
    10381  {
    10382  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    10383  if(res != VK_SUCCESS)
    10384  {
    10385  return res;
    10386  }
    10387  }
    10388  return VK_SUCCESS;
    10389 }
    10390 
    10391 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    10392 {
    10393  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10394 
    10395  const size_t blockCount = m_Blocks.size();
    10396 
    10397  pStats->size = 0;
    10398  pStats->unusedSize = 0;
    10399  pStats->allocationCount = 0;
    10400  pStats->unusedRangeCount = 0;
    10401  pStats->unusedRangeSizeMax = 0;
    10402  pStats->blockCount = blockCount;
    10403 
    10404  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    10405  {
    10406  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    10407  VMA_ASSERT(pBlock);
    10408  VMA_HEAVY_ASSERT(pBlock->Validate());
    10409  pBlock->m_pMetadata->AddPoolStats(*pStats);
    10410  }
    10411 }
    10412 
    10413 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    10414 {
    10415  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    10416  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    10417  (VMA_DEBUG_MARGIN > 0) &&
    10418  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    10419 }
    10420 
    10421 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    10422 
    10423 VkResult VmaBlockVector::Allocate(
    10424  VmaPool hCurrentPool,
    10425  uint32_t currentFrameIndex,
    10426  VkDeviceSize size,
    10427  VkDeviceSize alignment,
    10428  const VmaAllocationCreateInfo& createInfo,
    10429  VmaSuballocationType suballocType,
    10430  VmaAllocation* pAllocation)
    10431 {
    10432  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10433  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    10434  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10435  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10436  const bool canCreateNewBlock =
    10437  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    10438  (m_Blocks.size() < m_MaxBlockCount);
    10439  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    10440 
    10441  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    10442  // Which in turn is available only when maxBlockCount = 1.
    10443  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    10444  {
    10445  canMakeOtherLost = false;
    10446  }
    10447 
    10448  // Upper address can only be used with linear allocator and within single memory block.
    10449  if(isUpperAddress &&
    10450  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    10451  {
    10452  return VK_ERROR_FEATURE_NOT_PRESENT;
    10453  }
    10454 
    10455  // Validate strategy.
    10456  switch(strategy)
    10457  {
    10458  case 0:
    10460  break;
    10464  break;
    10465  default:
    10466  return VK_ERROR_FEATURE_NOT_PRESENT;
    10467  }
    10468 
    10469  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    10470  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    10471  {
    10472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10473  }
    10474 
    10475  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10476 
    10477  /*
    10478  Under certain condition, this whole section can be skipped for optimization, so
    10479  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    10480  e.g. for custom pools with linear algorithm.
    10481  */
    10482  if(!canMakeOtherLost || canCreateNewBlock)
    10483  {
    10484  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    10485  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    10487 
    10488  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10489  {
    10490  // Use only last block.
    10491  if(!m_Blocks.empty())
    10492  {
    10493  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    10494  VMA_ASSERT(pCurrBlock);
    10495  VkResult res = AllocateFromBlock(
    10496  pCurrBlock,
    10497  hCurrentPool,
    10498  currentFrameIndex,
    10499  size,
    10500  alignment,
    10501  allocFlagsCopy,
    10502  createInfo.pUserData,
    10503  suballocType,
    10504  strategy,
    10505  pAllocation);
    10506  if(res == VK_SUCCESS)
    10507  {
    10508  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    10509  return VK_SUCCESS;
    10510  }
    10511  }
    10512  }
    10513  else
    10514  {
    10516  {
    10517  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10518  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10519  {
    10520  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10521  VMA_ASSERT(pCurrBlock);
    10522  VkResult res = AllocateFromBlock(
    10523  pCurrBlock,
    10524  hCurrentPool,
    10525  currentFrameIndex,
    10526  size,
    10527  alignment,
    10528  allocFlagsCopy,
    10529  createInfo.pUserData,
    10530  suballocType,
    10531  strategy,
    10532  pAllocation);
    10533  if(res == VK_SUCCESS)
    10534  {
    10535  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10536  return VK_SUCCESS;
    10537  }
    10538  }
    10539  }
    10540  else // WORST_FIT, FIRST_FIT
    10541  {
    10542  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10543  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10544  {
    10545  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10546  VMA_ASSERT(pCurrBlock);
    10547  VkResult res = AllocateFromBlock(
    10548  pCurrBlock,
    10549  hCurrentPool,
    10550  currentFrameIndex,
    10551  size,
    10552  alignment,
    10553  allocFlagsCopy,
    10554  createInfo.pUserData,
    10555  suballocType,
    10556  strategy,
    10557  pAllocation);
    10558  if(res == VK_SUCCESS)
    10559  {
    10560  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    10561  return VK_SUCCESS;
    10562  }
    10563  }
    10564  }
    10565  }
    10566 
    10567  // 2. Try to create new block.
    10568  if(canCreateNewBlock)
    10569  {
    10570  // Calculate optimal size for new block.
    10571  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    10572  uint32_t newBlockSizeShift = 0;
    10573  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    10574 
    10575  if(!m_ExplicitBlockSize)
    10576  {
    10577  // Allocate 1/8, 1/4, 1/2 as first blocks.
    10578  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    10579  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    10580  {
    10581  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10582  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    10583  {
    10584  newBlockSize = smallerNewBlockSize;
    10585  ++newBlockSizeShift;
    10586  }
    10587  else
    10588  {
    10589  break;
    10590  }
    10591  }
    10592  }
    10593 
    10594  size_t newBlockIndex = 0;
    10595  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    10596  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    10597  if(!m_ExplicitBlockSize)
    10598  {
    10599  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    10600  {
    10601  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    10602  if(smallerNewBlockSize >= size)
    10603  {
    10604  newBlockSize = smallerNewBlockSize;
    10605  ++newBlockSizeShift;
    10606  res = CreateBlock(newBlockSize, &newBlockIndex);
    10607  }
    10608  else
    10609  {
    10610  break;
    10611  }
    10612  }
    10613  }
    10614 
    10615  if(res == VK_SUCCESS)
    10616  {
    10617  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    10618  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    10619 
    10620  res = AllocateFromBlock(
    10621  pBlock,
    10622  hCurrentPool,
    10623  currentFrameIndex,
    10624  size,
    10625  alignment,
    10626  allocFlagsCopy,
    10627  createInfo.pUserData,
    10628  suballocType,
    10629  strategy,
    10630  pAllocation);
    10631  if(res == VK_SUCCESS)
    10632  {
    10633  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    10634  return VK_SUCCESS;
    10635  }
    10636  else
    10637  {
    10638  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    10639  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10640  }
    10641  }
    10642  }
    10643  }
    10644 
    10645  // 3. Try to allocate from existing blocks with making other allocations lost.
    10646  if(canMakeOtherLost)
    10647  {
    10648  uint32_t tryIndex = 0;
    10649  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    10650  {
    10651  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    10652  VmaAllocationRequest bestRequest = {};
    10653  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    10654 
    10655  // 1. Search existing allocations.
    10657  {
    10658  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    10659  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    10660  {
    10661  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10662  VMA_ASSERT(pCurrBlock);
    10663  VmaAllocationRequest currRequest = {};
    10664  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10665  currentFrameIndex,
    10666  m_FrameInUseCount,
    10667  m_BufferImageGranularity,
    10668  size,
    10669  alignment,
    10670  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10671  suballocType,
    10672  canMakeOtherLost,
    10673  strategy,
    10674  &currRequest))
    10675  {
    10676  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10677  if(pBestRequestBlock == VMA_NULL ||
    10678  currRequestCost < bestRequestCost)
    10679  {
    10680  pBestRequestBlock = pCurrBlock;
    10681  bestRequest = currRequest;
    10682  bestRequestCost = currRequestCost;
    10683 
    10684  if(bestRequestCost == 0)
    10685  {
    10686  break;
    10687  }
    10688  }
    10689  }
    10690  }
    10691  }
    10692  else // WORST_FIT, FIRST_FIT
    10693  {
    10694  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    10695  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    10696  {
    10697  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    10698  VMA_ASSERT(pCurrBlock);
    10699  VmaAllocationRequest currRequest = {};
    10700  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    10701  currentFrameIndex,
    10702  m_FrameInUseCount,
    10703  m_BufferImageGranularity,
    10704  size,
    10705  alignment,
    10706  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    10707  suballocType,
    10708  canMakeOtherLost,
    10709  strategy,
    10710  &currRequest))
    10711  {
    10712  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    10713  if(pBestRequestBlock == VMA_NULL ||
    10714  currRequestCost < bestRequestCost ||
    10716  {
    10717  pBestRequestBlock = pCurrBlock;
    10718  bestRequest = currRequest;
    10719  bestRequestCost = currRequestCost;
    10720 
    10721  if(bestRequestCost == 0 ||
    10723  {
    10724  break;
    10725  }
    10726  }
    10727  }
    10728  }
    10729  }
    10730 
    10731  if(pBestRequestBlock != VMA_NULL)
    10732  {
    10733  if(mapped)
    10734  {
    10735  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    10736  if(res != VK_SUCCESS)
    10737  {
    10738  return res;
    10739  }
    10740  }
    10741 
    10742  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    10743  currentFrameIndex,
    10744  m_FrameInUseCount,
    10745  &bestRequest))
    10746  {
    10747  // We no longer have an empty Allocation.
    10748  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    10749  {
    10750  m_HasEmptyBlock = false;
    10751  }
    10752  // Allocate from this pBlock.
    10753  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10754  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
    10755  (*pAllocation)->InitBlockAllocation(
    10756  hCurrentPool,
    10757  pBestRequestBlock,
    10758  bestRequest.offset,
    10759  alignment,
    10760  size,
    10761  suballocType,
    10762  mapped,
    10763  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10764  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    10765  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
    10766  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    10767  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10768  {
    10769  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10770  }
    10771  if(IsCorruptionDetectionEnabled())
    10772  {
    10773  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    10774  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10775  }
    10776  return VK_SUCCESS;
    10777  }
    10778  // else: Some allocations must have been touched while we are here. Next try.
    10779  }
    10780  else
    10781  {
    10782  // Could not find place in any of the blocks - break outer loop.
    10783  break;
    10784  }
    10785  }
    10786  /* Maximum number of tries exceeded - a very unlike event when many other
    10787  threads are simultaneously touching allocations making it impossible to make
    10788  lost at the same time as we try to allocate. */
    10789  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    10790  {
    10791  return VK_ERROR_TOO_MANY_OBJECTS;
    10792  }
    10793  }
    10794 
    10795  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10796 }
    10797 
    10798 void VmaBlockVector::Free(
    10799  VmaAllocation hAllocation)
    10800 {
    10801  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    10802 
    10803  // Scope for lock.
    10804  {
    10805  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    10806 
    10807  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    10808 
    10809  if(IsCorruptionDetectionEnabled())
    10810  {
    10811  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    10812  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    10813  }
    10814 
    10815  if(hAllocation->IsPersistentMap())
    10816  {
    10817  pBlock->Unmap(m_hAllocator, 1);
    10818  }
    10819 
    10820  pBlock->m_pMetadata->Free(hAllocation);
    10821  VMA_HEAVY_ASSERT(pBlock->Validate());
    10822 
    10823  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
    10824 
    10825  // pBlock became empty after this deallocation.
    10826  if(pBlock->m_pMetadata->IsEmpty())
    10827  {
    10828  // Already has empty Allocation. We don't want to have two, so delete this one.
    10829  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    10830  {
    10831  pBlockToDelete = pBlock;
    10832  Remove(pBlock);
    10833  }
    10834  // We now have first empty block.
    10835  else
    10836  {
    10837  m_HasEmptyBlock = true;
    10838  }
    10839  }
    10840  // pBlock didn't become empty, but we have another empty block - find and free that one.
    10841  // (This is optional, heuristics.)
    10842  else if(m_HasEmptyBlock)
    10843  {
    10844  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    10845  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    10846  {
    10847  pBlockToDelete = pLastBlock;
    10848  m_Blocks.pop_back();
    10849  m_HasEmptyBlock = false;
    10850  }
    10851  }
    10852 
    10853  IncrementallySortBlocks();
    10854  }
    10855 
    10856  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    10857  // lock, for performance reason.
    10858  if(pBlockToDelete != VMA_NULL)
    10859  {
    10860  VMA_DEBUG_LOG(" Deleted empty allocation");
    10861  pBlockToDelete->Destroy(m_hAllocator);
    10862  vma_delete(m_hAllocator, pBlockToDelete);
    10863  }
    10864 }
    10865 
    10866 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    10867 {
    10868  VkDeviceSize result = 0;
    10869  for(size_t i = m_Blocks.size(); i--; )
    10870  {
    10871  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    10872  if(result >= m_PreferredBlockSize)
    10873  {
    10874  break;
    10875  }
    10876  }
    10877  return result;
    10878 }
    10879 
    10880 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    10881 {
    10882  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    10883  {
    10884  if(m_Blocks[blockIndex] == pBlock)
    10885  {
    10886  VmaVectorRemove(m_Blocks, blockIndex);
    10887  return;
    10888  }
    10889  }
    10890  VMA_ASSERT(0);
    10891 }
    10892 
    10893 void VmaBlockVector::IncrementallySortBlocks()
    10894 {
    10895  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    10896  {
    10897  // Bubble sort only until first swap.
    10898  for(size_t i = 1; i < m_Blocks.size(); ++i)
    10899  {
    10900  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    10901  {
    10902  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    10903  return;
    10904  }
    10905  }
    10906  }
    10907 }
    10908 
    10909 VkResult VmaBlockVector::AllocateFromBlock(
    10910  VmaDeviceMemoryBlock* pBlock,
    10911  VmaPool hCurrentPool,
    10912  uint32_t currentFrameIndex,
    10913  VkDeviceSize size,
    10914  VkDeviceSize alignment,
    10915  VmaAllocationCreateFlags allocFlags,
    10916  void* pUserData,
    10917  VmaSuballocationType suballocType,
    10918  uint32_t strategy,
    10919  VmaAllocation* pAllocation)
    10920 {
    10921  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    10922  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    10923  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    10924  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    10925 
    10926  VmaAllocationRequest currRequest = {};
    10927  if(pBlock->m_pMetadata->CreateAllocationRequest(
    10928  currentFrameIndex,
    10929  m_FrameInUseCount,
    10930  m_BufferImageGranularity,
    10931  size,
    10932  alignment,
    10933  isUpperAddress,
    10934  suballocType,
    10935  false, // canMakeOtherLost
    10936  strategy,
    10937  &currRequest))
    10938  {
    10939  // Allocate from pCurrBlock.
    10940  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    10941 
    10942  if(mapped)
    10943  {
    10944  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    10945  if(res != VK_SUCCESS)
    10946  {
    10947  return res;
    10948  }
    10949  }
    10950 
    10951  // We no longer have an empty Allocation.
    10952  if(pBlock->m_pMetadata->IsEmpty())
    10953  {
    10954  m_HasEmptyBlock = false;
    10955  }
    10956 
    10957  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
    10958  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
    10959  (*pAllocation)->InitBlockAllocation(
    10960  hCurrentPool,
    10961  pBlock,
    10962  currRequest.offset,
    10963  alignment,
    10964  size,
    10965  suballocType,
    10966  mapped,
    10967  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    10968  VMA_HEAVY_ASSERT(pBlock->Validate());
    10969  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    10970  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    10971  {
    10972  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    10973  }
    10974  if(IsCorruptionDetectionEnabled())
    10975  {
    10976  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    10977  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    10978  }
    10979  return VK_SUCCESS;
    10980  }
    10981  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    10982 }
    10983 
    10984 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    10985 {
    10986  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    10987  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    10988  allocInfo.allocationSize = blockSize;
    10989  VkDeviceMemory mem = VK_NULL_HANDLE;
    10990  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    10991  if(res < 0)
    10992  {
    10993  return res;
    10994  }
    10995 
    10996  // New VkDeviceMemory successfully created.
    10997 
    10998  // Create new Allocation for it.
    10999  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    11000  pBlock->Init(
    11001  m_hAllocator,
    11002  m_MemoryTypeIndex,
    11003  mem,
    11004  allocInfo.allocationSize,
    11005  m_NextBlockId++,
    11006  m_Algorithm);
    11007 
    11008  m_Blocks.push_back(pBlock);
    11009  if(pNewBlockIndex != VMA_NULL)
    11010  {
    11011  *pNewBlockIndex = m_Blocks.size() - 1;
    11012  }
    11013 
    11014  return VK_SUCCESS;
    11015 }
    11016 
    11017 #if VMA_STATS_STRING_ENABLED
    11018 
    11019 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    11020 {
    11021  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11022 
    11023  json.BeginObject();
    11024 
    11025  if(m_IsCustomPool)
    11026  {
    11027  json.WriteString("MemoryTypeIndex");
    11028  json.WriteNumber(m_MemoryTypeIndex);
    11029 
    11030  json.WriteString("BlockSize");
    11031  json.WriteNumber(m_PreferredBlockSize);
    11032 
    11033  json.WriteString("BlockCount");
    11034  json.BeginObject(true);
    11035  if(m_MinBlockCount > 0)
    11036  {
    11037  json.WriteString("Min");
    11038  json.WriteNumber((uint64_t)m_MinBlockCount);
    11039  }
    11040  if(m_MaxBlockCount < SIZE_MAX)
    11041  {
    11042  json.WriteString("Max");
    11043  json.WriteNumber((uint64_t)m_MaxBlockCount);
    11044  }
    11045  json.WriteString("Cur");
    11046  json.WriteNumber((uint64_t)m_Blocks.size());
    11047  json.EndObject();
    11048 
    11049  if(m_FrameInUseCount > 0)
    11050  {
    11051  json.WriteString("FrameInUseCount");
    11052  json.WriteNumber(m_FrameInUseCount);
    11053  }
    11054 
    11055  if(m_Algorithm != 0)
    11056  {
    11057  json.WriteString("Algorithm");
    11058  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    11059  }
    11060  }
    11061  else
    11062  {
    11063  json.WriteString("PreferredBlockSize");
    11064  json.WriteNumber(m_PreferredBlockSize);
    11065  }
    11066 
    11067  json.WriteString("Blocks");
    11068  json.BeginObject();
    11069  for(size_t i = 0; i < m_Blocks.size(); ++i)
    11070  {
    11071  json.BeginString();
    11072  json.ContinueString(m_Blocks[i]->GetId());
    11073  json.EndString();
    11074 
    11075  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    11076  }
    11077  json.EndObject();
    11078 
    11079  json.EndObject();
    11080 }
    11081 
    11082 #endif // #if VMA_STATS_STRING_ENABLED
    11083 
    11084 VmaDefragmentator* VmaBlockVector::EnsureDefragmentator(
    11085  VmaAllocator hAllocator,
    11086  uint32_t currentFrameIndex)
    11087 {
    11088  if(m_pDefragmentator == VMA_NULL)
    11089  {
    11090  m_pDefragmentator = vma_new(m_hAllocator, VmaDefragmentator)(
    11091  hAllocator,
    11092  this,
    11093  currentFrameIndex);
    11094  }
    11095 
    11096  return m_pDefragmentator;
    11097 }
    11098 
    11099 VkResult VmaBlockVector::Defragment(
    11100  VmaDefragmentationStats* pDefragmentationStats,
    11101  VkDeviceSize& maxBytesToMove,
    11102  uint32_t& maxAllocationsToMove)
    11103 {
    11104  if(m_pDefragmentator == VMA_NULL)
    11105  {
    11106  return VK_SUCCESS;
    11107  }
    11108 
    11109  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11110 
    11111  // Defragment.
    11112  VkResult result = m_pDefragmentator->Defragment(maxBytesToMove, maxAllocationsToMove);
    11113 
    11114  // Accumulate statistics.
    11115  if(pDefragmentationStats != VMA_NULL)
    11116  {
    11117  const VkDeviceSize bytesMoved = m_pDefragmentator->GetBytesMoved();
    11118  const uint32_t allocationsMoved = m_pDefragmentator->GetAllocationsMoved();
    11119  pDefragmentationStats->bytesMoved += bytesMoved;
    11120  pDefragmentationStats->allocationsMoved += allocationsMoved;
    11121  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    11122  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    11123  maxBytesToMove -= bytesMoved;
    11124  maxAllocationsToMove -= allocationsMoved;
    11125  }
    11126 
    11127  // Free empty blocks.
    11128  m_HasEmptyBlock = false;
    11129  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11130  {
    11131  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    11132  if(pBlock->m_pMetadata->IsEmpty())
    11133  {
    11134  if(m_Blocks.size() > m_MinBlockCount)
    11135  {
    11136  if(pDefragmentationStats != VMA_NULL)
    11137  {
    11138  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    11139  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    11140  }
    11141 
    11142  VmaVectorRemove(m_Blocks, blockIndex);
    11143  pBlock->Destroy(m_hAllocator);
    11144  vma_delete(m_hAllocator, pBlock);
    11145  }
    11146  else
    11147  {
    11148  m_HasEmptyBlock = true;
    11149  }
    11150  }
    11151  }
    11152 
    11153  return result;
    11154 }
    11155 
    11156 void VmaBlockVector::DestroyDefragmentator()
    11157 {
    11158  if(m_pDefragmentator != VMA_NULL)
    11159  {
    11160  vma_delete(m_hAllocator, m_pDefragmentator);
    11161  m_pDefragmentator = VMA_NULL;
    11162  }
    11163 }
    11164 
    11165 void VmaBlockVector::MakePoolAllocationsLost(
    11166  uint32_t currentFrameIndex,
    11167  size_t* pLostAllocationCount)
    11168 {
    11169  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11170  size_t lostAllocationCount = 0;
    11171  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11172  {
    11173  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11174  VMA_ASSERT(pBlock);
    11175  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    11176  }
    11177  if(pLostAllocationCount != VMA_NULL)
    11178  {
    11179  *pLostAllocationCount = lostAllocationCount;
    11180  }
    11181 }
    11182 
    11183 VkResult VmaBlockVector::CheckCorruption()
    11184 {
    11185  if(!IsCorruptionDetectionEnabled())
    11186  {
    11187  return VK_ERROR_FEATURE_NOT_PRESENT;
    11188  }
    11189 
    11190  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11191  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11192  {
    11193  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11194  VMA_ASSERT(pBlock);
    11195  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    11196  if(res != VK_SUCCESS)
    11197  {
    11198  return res;
    11199  }
    11200  }
    11201  return VK_SUCCESS;
    11202 }
    11203 
    11204 void VmaBlockVector::AddStats(VmaStats* pStats)
    11205 {
    11206  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    11207  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    11208 
    11209  VmaMutexLock lock(m_Mutex, m_hAllocator->m_UseMutex);
    11210 
    11211  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11212  {
    11213  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11214  VMA_ASSERT(pBlock);
    11215  VMA_HEAVY_ASSERT(pBlock->Validate());
    11216  VmaStatInfo allocationStatInfo;
    11217  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    11218  VmaAddStatInfo(pStats->total, allocationStatInfo);
    11219  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    11220  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    11221  }
    11222 }
    11223 
    11225 // VmaDefragmentator members definition
    11226 
    11227 VmaDefragmentator::VmaDefragmentator(
    11228  VmaAllocator hAllocator,
    11229  VmaBlockVector* pBlockVector,
    11230  uint32_t currentFrameIndex) :
    11231  m_hAllocator(hAllocator),
    11232  m_pBlockVector(pBlockVector),
    11233  m_CurrentFrameIndex(currentFrameIndex),
    11234  m_BytesMoved(0),
    11235  m_AllocationsMoved(0),
    11236  m_Allocations(VmaStlAllocator<AllocationInfo>(hAllocator->GetAllocationCallbacks())),
    11237  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    11238 {
    11239  VMA_ASSERT(pBlockVector->GetAlgorithm() == 0);
    11240 }
    11241 
    11242 VmaDefragmentator::~VmaDefragmentator()
    11243 {
    11244  for(size_t i = m_Blocks.size(); i--; )
    11245  {
    11246  vma_delete(m_hAllocator, m_Blocks[i]);
    11247  }
    11248 }
    11249 
    11250 void VmaDefragmentator::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    11251 {
    11252  AllocationInfo allocInfo;
    11253  allocInfo.m_hAllocation = hAlloc;
    11254  allocInfo.m_pChanged = pChanged;
    11255  m_Allocations.push_back(allocInfo);
    11256 }
    11257 
    11258 VkResult VmaDefragmentator::BlockInfo::EnsureMapping(VmaAllocator hAllocator, void** ppMappedData)
    11259 {
    11260  // It has already been mapped for defragmentation.
    11261  if(m_pMappedDataForDefragmentation)
    11262  {
    11263  *ppMappedData = m_pMappedDataForDefragmentation;
    11264  return VK_SUCCESS;
    11265  }
    11266 
    11267  // It is originally mapped.
    11268  if(m_pBlock->GetMappedData())
    11269  {
    11270  *ppMappedData = m_pBlock->GetMappedData();
    11271  return VK_SUCCESS;
    11272  }
    11273 
    11274  // Map on first usage.
    11275  VkResult res = m_pBlock->Map(hAllocator, 1, &m_pMappedDataForDefragmentation);
    11276  *ppMappedData = m_pMappedDataForDefragmentation;
    11277  return res;
    11278 }
    11279 
    11280 void VmaDefragmentator::BlockInfo::Unmap(VmaAllocator hAllocator)
    11281 {
    11282  if(m_pMappedDataForDefragmentation != VMA_NULL)
    11283  {
    11284  m_pBlock->Unmap(hAllocator, 1);
    11285  }
    11286 }
    11287 
    11288 VkResult VmaDefragmentator::DefragmentRound(
    11289  VkDeviceSize maxBytesToMove,
    11290  uint32_t maxAllocationsToMove)
    11291 {
    11292  if(m_Blocks.empty())
    11293  {
    11294  return VK_SUCCESS;
    11295  }
    11296 
    11297  size_t srcBlockIndex = m_Blocks.size() - 1;
    11298  size_t srcAllocIndex = SIZE_MAX;
    11299  for(;;)
    11300  {
    11301  // 1. Find next allocation to move.
    11302  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    11303  // 1.2. Then start from last to first m_Allocations - they are sorted from largest to smallest.
    11304  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    11305  {
    11306  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    11307  {
    11308  // Finished: no more allocations to process.
    11309  if(srcBlockIndex == 0)
    11310  {
    11311  return VK_SUCCESS;
    11312  }
    11313  else
    11314  {
    11315  --srcBlockIndex;
    11316  srcAllocIndex = SIZE_MAX;
    11317  }
    11318  }
    11319  else
    11320  {
    11321  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    11322  }
    11323  }
    11324 
    11325  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    11326  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    11327 
    11328  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    11329  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    11330  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    11331  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    11332 
    11333  // 2. Try to find new place for this allocation in preceding or current block.
    11334  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    11335  {
    11336  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    11337  VmaAllocationRequest dstAllocRequest;
    11338  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    11339  m_CurrentFrameIndex,
    11340  m_pBlockVector->GetFrameInUseCount(),
    11341  m_pBlockVector->GetBufferImageGranularity(),
    11342  size,
    11343  alignment,
    11344  false, // upperAddress
    11345  suballocType,
    11346  false, // canMakeOtherLost
    11348  &dstAllocRequest) &&
    11349  MoveMakesSense(
    11350  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    11351  {
    11352  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    11353 
    11354  // Reached limit on number of allocations or bytes to move.
    11355  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    11356  (m_BytesMoved + size > maxBytesToMove))
    11357  {
    11358  return VK_INCOMPLETE;
    11359  }
    11360 
    11361  void* pDstMappedData = VMA_NULL;
    11362  VkResult res = pDstBlockInfo->EnsureMapping(m_hAllocator, &pDstMappedData);
    11363  if(res != VK_SUCCESS)
    11364  {
    11365  return res;
    11366  }
    11367 
    11368  void* pSrcMappedData = VMA_NULL;
    11369  res = pSrcBlockInfo->EnsureMapping(m_hAllocator, &pSrcMappedData);
    11370  if(res != VK_SUCCESS)
    11371  {
    11372  return res;
    11373  }
    11374 
    11375  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    11376  memcpy(
    11377  reinterpret_cast<char*>(pDstMappedData) + dstAllocRequest.offset,
    11378  reinterpret_cast<char*>(pSrcMappedData) + srcOffset,
    11379  static_cast<size_t>(size));
    11380 
    11381  if(VMA_DEBUG_MARGIN > 0)
    11382  {
    11383  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset - VMA_DEBUG_MARGIN);
    11384  VmaWriteMagicValue(pDstMappedData, dstAllocRequest.offset + size);
    11385  }
    11386 
    11387  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    11388  dstAllocRequest,
    11389  suballocType,
    11390  size,
    11391  false, // upperAddress
    11392  allocInfo.m_hAllocation);
    11393  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    11394 
    11395  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    11396 
    11397  if(allocInfo.m_pChanged != VMA_NULL)
    11398  {
    11399  *allocInfo.m_pChanged = VK_TRUE;
    11400  }
    11401 
    11402  ++m_AllocationsMoved;
    11403  m_BytesMoved += size;
    11404 
    11405  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    11406 
    11407  break;
    11408  }
    11409  }
    11410 
    11411  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    11412 
    11413  if(srcAllocIndex > 0)
    11414  {
    11415  --srcAllocIndex;
    11416  }
    11417  else
    11418  {
    11419  if(srcBlockIndex > 0)
    11420  {
    11421  --srcBlockIndex;
    11422  srcAllocIndex = SIZE_MAX;
    11423  }
    11424  else
    11425  {
    11426  return VK_SUCCESS;
    11427  }
    11428  }
    11429  }
    11430 }
    11431 
    11432 VkResult VmaDefragmentator::Defragment(
    11433  VkDeviceSize maxBytesToMove,
    11434  uint32_t maxAllocationsToMove)
    11435 {
    11436  if(m_Allocations.empty())
    11437  {
    11438  return VK_SUCCESS;
    11439  }
    11440 
    11441  // Create block info for each block.
    11442  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    11443  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11444  {
    11445  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    11446  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    11447  m_Blocks.push_back(pBlockInfo);
    11448  }
    11449 
    11450  // Sort them by m_pBlock pointer value.
    11451  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    11452 
    11453  // Move allocation infos from m_Allocations to appropriate m_Blocks[memTypeIndex].m_Allocations.
    11454  for(size_t blockIndex = 0, allocCount = m_Allocations.size(); blockIndex < allocCount; ++blockIndex)
    11455  {
    11456  AllocationInfo& allocInfo = m_Allocations[blockIndex];
    11457  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    11458  if(allocInfo.m_hAllocation->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    11459  {
    11460  VmaDeviceMemoryBlock* pBlock = allocInfo.m_hAllocation->GetBlock();
    11461  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    11462  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    11463  {
    11464  (*it)->m_Allocations.push_back(allocInfo);
    11465  }
    11466  else
    11467  {
    11468  VMA_ASSERT(0);
    11469  }
    11470  }
    11471  }
    11472  m_Allocations.clear();
    11473 
    11474  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11475  {
    11476  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    11477  pBlockInfo->CalcHasNonMovableAllocations();
    11478  pBlockInfo->SortAllocationsBySizeDescecnding();
    11479  }
    11480 
    11481  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    11482  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    11483 
    11484  // Execute defragmentation rounds (the main part).
    11485  VkResult result = VK_SUCCESS;
    11486  for(size_t round = 0; (round < 2) && (result == VK_SUCCESS); ++round)
    11487  {
    11488  result = DefragmentRound(maxBytesToMove, maxAllocationsToMove);
    11489  }
    11490 
    11491  // Unmap blocks that were mapped for defragmentation.
    11492  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11493  {
    11494  m_Blocks[blockIndex]->Unmap(m_hAllocator);
    11495  }
    11496 
    11497  return result;
    11498 }
    11499 
    11500 bool VmaDefragmentator::MoveMakesSense(
    11501  size_t dstBlockIndex, VkDeviceSize dstOffset,
    11502  size_t srcBlockIndex, VkDeviceSize srcOffset)
    11503 {
    11504  if(dstBlockIndex < srcBlockIndex)
    11505  {
    11506  return true;
    11507  }
    11508  if(dstBlockIndex > srcBlockIndex)
    11509  {
    11510  return false;
    11511  }
    11512  if(dstOffset < srcOffset)
    11513  {
    11514  return true;
    11515  }
    11516  return false;
    11517 }
    11518 
    11520 // VmaRecorder
    11521 
    11522 #if VMA_RECORDING_ENABLED
    11523 
    11524 VmaRecorder::VmaRecorder() :
    11525  m_UseMutex(true),
    11526  m_Flags(0),
    11527  m_File(VMA_NULL),
    11528  m_Freq(INT64_MAX),
    11529  m_StartCounter(INT64_MAX)
    11530 {
    11531 }
    11532 
    11533 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    11534 {
    11535  m_UseMutex = useMutex;
    11536  m_Flags = settings.flags;
    11537 
    11538  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    11539  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    11540 
    11541  // Open file for writing.
    11542  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    11543  if(err != 0)
    11544  {
    11545  return VK_ERROR_INITIALIZATION_FAILED;
    11546  }
    11547 
    11548  // Write header.
    11549  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    11550  fprintf(m_File, "%s\n", "1,4");
    11551 
    11552  return VK_SUCCESS;
    11553 }
    11554 
    11555 VmaRecorder::~VmaRecorder()
    11556 {
    11557  if(m_File != VMA_NULL)
    11558  {
    11559  fclose(m_File);
    11560  }
    11561 }
    11562 
    11563 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    11564 {
    11565  CallParams callParams;
    11566  GetBasicParams(callParams);
    11567 
    11568  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11569  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11570  Flush();
    11571 }
    11572 
    11573 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    11574 {
    11575  CallParams callParams;
    11576  GetBasicParams(callParams);
    11577 
    11578  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11579  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    11580  Flush();
    11581 }
    11582 
    11583 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    11584 {
    11585  CallParams callParams;
    11586  GetBasicParams(callParams);
    11587 
    11588  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11589  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    11590  createInfo.memoryTypeIndex,
    11591  createInfo.flags,
    11592  createInfo.blockSize,
    11593  (uint64_t)createInfo.minBlockCount,
    11594  (uint64_t)createInfo.maxBlockCount,
    11595  createInfo.frameInUseCount,
    11596  pool);
    11597  Flush();
    11598 }
    11599 
    11600 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    11601 {
    11602  CallParams callParams;
    11603  GetBasicParams(callParams);
    11604 
    11605  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11606  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    11607  pool);
    11608  Flush();
    11609 }
    11610 
    11611 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    11612  const VkMemoryRequirements& vkMemReq,
    11613  const VmaAllocationCreateInfo& createInfo,
    11614  VmaAllocation allocation)
    11615 {
    11616  CallParams callParams;
    11617  GetBasicParams(callParams);
    11618 
    11619  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11620  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11621  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11622  vkMemReq.size,
    11623  vkMemReq.alignment,
    11624  vkMemReq.memoryTypeBits,
    11625  createInfo.flags,
    11626  createInfo.usage,
    11627  createInfo.requiredFlags,
    11628  createInfo.preferredFlags,
    11629  createInfo.memoryTypeBits,
    11630  createInfo.pool,
    11631  allocation,
    11632  userDataStr.GetString());
    11633  Flush();
    11634 }
    11635 
    11636 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    11637  const VkMemoryRequirements& vkMemReq,
    11638  bool requiresDedicatedAllocation,
    11639  bool prefersDedicatedAllocation,
    11640  const VmaAllocationCreateInfo& createInfo,
    11641  VmaAllocation allocation)
    11642 {
    11643  CallParams callParams;
    11644  GetBasicParams(callParams);
    11645 
    11646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11647  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11648  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11649  vkMemReq.size,
    11650  vkMemReq.alignment,
    11651  vkMemReq.memoryTypeBits,
    11652  requiresDedicatedAllocation ? 1 : 0,
    11653  prefersDedicatedAllocation ? 1 : 0,
    11654  createInfo.flags,
    11655  createInfo.usage,
    11656  createInfo.requiredFlags,
    11657  createInfo.preferredFlags,
    11658  createInfo.memoryTypeBits,
    11659  createInfo.pool,
    11660  allocation,
    11661  userDataStr.GetString());
    11662  Flush();
    11663 }
    11664 
    11665 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    11666  const VkMemoryRequirements& vkMemReq,
    11667  bool requiresDedicatedAllocation,
    11668  bool prefersDedicatedAllocation,
    11669  const VmaAllocationCreateInfo& createInfo,
    11670  VmaAllocation allocation)
    11671 {
    11672  CallParams callParams;
    11673  GetBasicParams(callParams);
    11674 
    11675  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11676  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    11677  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11678  vkMemReq.size,
    11679  vkMemReq.alignment,
    11680  vkMemReq.memoryTypeBits,
    11681  requiresDedicatedAllocation ? 1 : 0,
    11682  prefersDedicatedAllocation ? 1 : 0,
    11683  createInfo.flags,
    11684  createInfo.usage,
    11685  createInfo.requiredFlags,
    11686  createInfo.preferredFlags,
    11687  createInfo.memoryTypeBits,
    11688  createInfo.pool,
    11689  allocation,
    11690  userDataStr.GetString());
    11691  Flush();
    11692 }
    11693 
    11694 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    11695  VmaAllocation allocation)
    11696 {
    11697  CallParams callParams;
    11698  GetBasicParams(callParams);
    11699 
    11700  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11701  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11702  allocation);
    11703  Flush();
    11704 }
    11705 
    11706 void VmaRecorder::RecordResizeAllocation(
    11707  uint32_t frameIndex,
    11708  VmaAllocation allocation,
    11709  VkDeviceSize newSize)
    11710 {
    11711  CallParams callParams;
    11712  GetBasicParams(callParams);
    11713 
    11714  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11715  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11716  allocation, newSize);
    11717  Flush();
    11718 }
    11719 
    11720 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    11721  VmaAllocation allocation,
    11722  const void* pUserData)
    11723 {
    11724  CallParams callParams;
    11725  GetBasicParams(callParams);
    11726 
    11727  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11728  UserDataString userDataStr(
    11729  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    11730  pUserData);
    11731  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11732  allocation,
    11733  userDataStr.GetString());
    11734  Flush();
    11735 }
    11736 
    11737 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    11738  VmaAllocation allocation)
    11739 {
    11740  CallParams callParams;
    11741  GetBasicParams(callParams);
    11742 
    11743  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11744  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11745  allocation);
    11746  Flush();
    11747 }
    11748 
    11749 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    11750  VmaAllocation allocation)
    11751 {
    11752  CallParams callParams;
    11753  GetBasicParams(callParams);
    11754 
    11755  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11756  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11757  allocation);
    11758  Flush();
    11759 }
    11760 
    11761 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    11762  VmaAllocation allocation)
    11763 {
    11764  CallParams callParams;
    11765  GetBasicParams(callParams);
    11766 
    11767  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11768  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    11769  allocation);
    11770  Flush();
    11771 }
    11772 
    11773 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    11774  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11775 {
    11776  CallParams callParams;
    11777  GetBasicParams(callParams);
    11778 
    11779  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11780  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11781  allocation,
    11782  offset,
    11783  size);
    11784  Flush();
    11785 }
    11786 
    11787 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    11788  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    11789 {
    11790  CallParams callParams;
    11791  GetBasicParams(callParams);
    11792 
    11793  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11794  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    11795  allocation,
    11796  offset,
    11797  size);
    11798  Flush();
    11799 }
    11800 
    11801 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    11802  const VkBufferCreateInfo& bufCreateInfo,
    11803  const VmaAllocationCreateInfo& allocCreateInfo,
    11804  VmaAllocation allocation)
    11805 {
    11806  CallParams callParams;
    11807  GetBasicParams(callParams);
    11808 
    11809  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11810  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11811  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11812  bufCreateInfo.flags,
    11813  bufCreateInfo.size,
    11814  bufCreateInfo.usage,
    11815  bufCreateInfo.sharingMode,
    11816  allocCreateInfo.flags,
    11817  allocCreateInfo.usage,
    11818  allocCreateInfo.requiredFlags,
    11819  allocCreateInfo.preferredFlags,
    11820  allocCreateInfo.memoryTypeBits,
    11821  allocCreateInfo.pool,
    11822  allocation,
    11823  userDataStr.GetString());
    11824  Flush();
    11825 }
    11826 
    11827 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    11828  const VkImageCreateInfo& imageCreateInfo,
    11829  const VmaAllocationCreateInfo& allocCreateInfo,
    11830  VmaAllocation allocation)
    11831 {
    11832  CallParams callParams;
    11833  GetBasicParams(callParams);
    11834 
    11835  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11836  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    11837  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    11838  imageCreateInfo.flags,
    11839  imageCreateInfo.imageType,
    11840  imageCreateInfo.format,
    11841  imageCreateInfo.extent.width,
    11842  imageCreateInfo.extent.height,
    11843  imageCreateInfo.extent.depth,
    11844  imageCreateInfo.mipLevels,
    11845  imageCreateInfo.arrayLayers,
    11846  imageCreateInfo.samples,
    11847  imageCreateInfo.tiling,
    11848  imageCreateInfo.usage,
    11849  imageCreateInfo.sharingMode,
    11850  imageCreateInfo.initialLayout,
    11851  allocCreateInfo.flags,
    11852  allocCreateInfo.usage,
    11853  allocCreateInfo.requiredFlags,
    11854  allocCreateInfo.preferredFlags,
    11855  allocCreateInfo.memoryTypeBits,
    11856  allocCreateInfo.pool,
    11857  allocation,
    11858  userDataStr.GetString());
    11859  Flush();
    11860 }
    11861 
    11862 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    11863  VmaAllocation allocation)
    11864 {
    11865  CallParams callParams;
    11866  GetBasicParams(callParams);
    11867 
    11868  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11869  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    11870  allocation);
    11871  Flush();
    11872 }
    11873 
    11874 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    11875  VmaAllocation allocation)
    11876 {
    11877  CallParams callParams;
    11878  GetBasicParams(callParams);
    11879 
    11880  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11881  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    11882  allocation);
    11883  Flush();
    11884 }
    11885 
    11886 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    11887  VmaAllocation allocation)
    11888 {
    11889  CallParams callParams;
    11890  GetBasicParams(callParams);
    11891 
    11892  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11893  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    11894  allocation);
    11895  Flush();
    11896 }
    11897 
    11898 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    11899  VmaAllocation allocation)
    11900 {
    11901  CallParams callParams;
    11902  GetBasicParams(callParams);
    11903 
    11904  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11905  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    11906  allocation);
    11907  Flush();
    11908 }
    11909 
    11910 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    11911  VmaPool pool)
    11912 {
    11913  CallParams callParams;
    11914  GetBasicParams(callParams);
    11915 
    11916  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    11917  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    11918  pool);
    11919  Flush();
    11920 }
    11921 
    11922 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    11923 {
    11924  if(pUserData != VMA_NULL)
    11925  {
    11926  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    11927  {
    11928  m_Str = (const char*)pUserData;
    11929  }
    11930  else
    11931  {
    11932  sprintf_s(m_PtrStr, "%p", pUserData);
    11933  m_Str = m_PtrStr;
    11934  }
    11935  }
    11936  else
    11937  {
    11938  m_Str = "";
    11939  }
    11940 }
    11941 
    11942 void VmaRecorder::WriteConfiguration(
    11943  const VkPhysicalDeviceProperties& devProps,
    11944  const VkPhysicalDeviceMemoryProperties& memProps,
    11945  bool dedicatedAllocationExtensionEnabled)
    11946 {
    11947  fprintf(m_File, "Config,Begin\n");
    11948 
    11949  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    11950  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    11951  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    11952  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    11953  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    11954  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    11955 
    11956  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    11957  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    11958  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    11959 
    11960  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    11961  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    11962  {
    11963  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    11964  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    11965  }
    11966  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    11967  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    11968  {
    11969  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    11970  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    11971  }
    11972 
    11973  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    11974 
    11975  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    11976  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    11977  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    11978  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    11979  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    11980  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    11981  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    11982  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    11983  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    11984 
    11985  fprintf(m_File, "Config,End\n");
    11986 }
    11987 
    11988 void VmaRecorder::GetBasicParams(CallParams& outParams)
    11989 {
    11990  outParams.threadId = GetCurrentThreadId();
    11991 
    11992  LARGE_INTEGER counter;
    11993  QueryPerformanceCounter(&counter);
    11994  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    11995 }
    11996 
    11997 void VmaRecorder::Flush()
    11998 {
    11999  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    12000  {
    12001  fflush(m_File);
    12002  }
    12003 }
    12004 
    12005 #endif // #if VMA_RECORDING_ENABLED
    12006 
    12008 // VmaAllocator_T
    12009 
    12010 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    12011  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    12012  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    12013  m_hDevice(pCreateInfo->device),
    12014  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    12015  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    12016  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    12017  m_PreferredLargeHeapBlockSize(0),
    12018  m_PhysicalDevice(pCreateInfo->physicalDevice),
    12019  m_CurrentFrameIndex(0),
    12020  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    12021  m_NextPoolId(0)
    12023  ,m_pRecorder(VMA_NULL)
    12024 #endif
    12025 {
    12026  if(VMA_DEBUG_DETECT_CORRUPTION)
    12027  {
    12028  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    12029  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    12030  }
    12031 
    12032  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    12033 
    12034 #if !(VMA_DEDICATED_ALLOCATION)
    12036  {
    12037  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    12038  }
    12039 #endif
    12040 
    12041  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    12042  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    12043  memset(&m_MemProps, 0, sizeof(m_MemProps));
    12044 
    12045  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    12046  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    12047 
    12048  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12049  {
    12050  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    12051  }
    12052 
    12053  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    12054  {
    12055  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    12056  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    12057  }
    12058 
    12059  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    12060 
    12061  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    12062  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    12063 
    12064  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    12065  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    12066  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    12067  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    12068 
    12069  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    12070  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    12071 
    12072  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    12073  {
    12074  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    12075  {
    12076  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    12077  if(limit != VK_WHOLE_SIZE)
    12078  {
    12079  m_HeapSizeLimit[heapIndex] = limit;
    12080  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    12081  {
    12082  m_MemProps.memoryHeaps[heapIndex].size = limit;
    12083  }
    12084  }
    12085  }
    12086  }
    12087 
    12088  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12089  {
    12090  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    12091 
    12092  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    12093  this,
    12094  memTypeIndex,
    12095  preferredBlockSize,
    12096  0,
    12097  SIZE_MAX,
    12098  GetBufferImageGranularity(),
    12099  pCreateInfo->frameInUseCount,
    12100  false, // isCustomPool
    12101  false, // explicitBlockSize
    12102  false); // linearAlgorithm
    12103  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    12104  // becase minBlockCount is 0.
    12105  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    12106 
    12107  }
    12108 }
    12109 
    12110 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    12111 {
    12112  VkResult res = VK_SUCCESS;
    12113 
    12114  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    12115  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    12116  {
    12117 #if VMA_RECORDING_ENABLED
    12118  m_pRecorder = vma_new(this, VmaRecorder)();
    12119  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    12120  if(res != VK_SUCCESS)
    12121  {
    12122  return res;
    12123  }
    12124  m_pRecorder->WriteConfiguration(
    12125  m_PhysicalDeviceProperties,
    12126  m_MemProps,
    12127  m_UseKhrDedicatedAllocation);
    12128  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    12129 #else
    12130  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    12131  return VK_ERROR_FEATURE_NOT_PRESENT;
    12132 #endif
    12133  }
    12134 
    12135  return res;
    12136 }
    12137 
    12138 VmaAllocator_T::~VmaAllocator_T()
    12139 {
    12140 #if VMA_RECORDING_ENABLED
    12141  if(m_pRecorder != VMA_NULL)
    12142  {
    12143  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    12144  vma_delete(this, m_pRecorder);
    12145  }
    12146 #endif
    12147 
    12148  VMA_ASSERT(m_Pools.empty());
    12149 
    12150  for(size_t i = GetMemoryTypeCount(); i--; )
    12151  {
    12152  vma_delete(this, m_pDedicatedAllocations[i]);
    12153  vma_delete(this, m_pBlockVectors[i]);
    12154  }
    12155 }
    12156 
    12157 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    12158 {
    12159 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12160  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
    12161  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
    12162  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    12163  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
    12164  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
    12165  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
    12166  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
    12167  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
    12168  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
    12169  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
    12170  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
    12171  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
    12172  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
    12173  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
    12174  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
    12175  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
    12176 #if VMA_DEDICATED_ALLOCATION
    12177  if(m_UseKhrDedicatedAllocation)
    12178  {
    12179  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    12180  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    12181  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    12182  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    12183  }
    12184 #endif // #if VMA_DEDICATED_ALLOCATION
    12185 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    12186 
    12187 #define VMA_COPY_IF_NOT_NULL(funcName) \
    12188  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    12189 
    12190  if(pVulkanFunctions != VMA_NULL)
    12191  {
    12192  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    12193  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    12194  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    12195  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    12196  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    12197  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    12198  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    12199  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    12200  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    12201  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    12202  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    12203  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    12204  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    12205  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    12206  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    12207  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    12208 #if VMA_DEDICATED_ALLOCATION
    12209  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    12210  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    12211 #endif
    12212  }
    12213 
    12214 #undef VMA_COPY_IF_NOT_NULL
    12215 
    12216  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    12217  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    12218  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    12219  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    12220  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    12221  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    12222  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    12223  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    12224  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    12225  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    12226  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    12227  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    12228  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    12229  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    12230  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    12231  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    12232  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    12233  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    12234 #if VMA_DEDICATED_ALLOCATION
    12235  if(m_UseKhrDedicatedAllocation)
    12236  {
    12237  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    12238  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    12239  }
    12240 #endif
    12241 }
    12242 
    12243 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    12244 {
    12245  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12246  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    12247  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    12248  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    12249 }
    12250 
    12251 VkResult VmaAllocator_T::AllocateMemoryOfType(
    12252  VkDeviceSize size,
    12253  VkDeviceSize alignment,
    12254  bool dedicatedAllocation,
    12255  VkBuffer dedicatedBuffer,
    12256  VkImage dedicatedImage,
    12257  const VmaAllocationCreateInfo& createInfo,
    12258  uint32_t memTypeIndex,
    12259  VmaSuballocationType suballocType,
    12260  VmaAllocation* pAllocation)
    12261 {
    12262  VMA_ASSERT(pAllocation != VMA_NULL);
    12263  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
    12264 
    12265  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    12266 
    12267  // If memory type is not HOST_VISIBLE, disable MAPPED.
    12268  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12269  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    12270  {
    12271  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    12272  }
    12273 
    12274  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    12275  VMA_ASSERT(blockVector);
    12276 
    12277  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    12278  bool preferDedicatedMemory =
    12279  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    12280  dedicatedAllocation ||
    12281  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    12282  size > preferredBlockSize / 2;
    12283 
    12284  if(preferDedicatedMemory &&
    12285  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    12286  finalCreateInfo.pool == VK_NULL_HANDLE)
    12287  {
    12289  }
    12290 
    12291  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    12292  {
    12293  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12294  {
    12295  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12296  }
    12297  else
    12298  {
    12299  return AllocateDedicatedMemory(
    12300  size,
    12301  suballocType,
    12302  memTypeIndex,
    12303  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12304  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12305  finalCreateInfo.pUserData,
    12306  dedicatedBuffer,
    12307  dedicatedImage,
    12308  pAllocation);
    12309  }
    12310  }
    12311  else
    12312  {
    12313  VkResult res = blockVector->Allocate(
    12314  VK_NULL_HANDLE, // hCurrentPool
    12315  m_CurrentFrameIndex.load(),
    12316  size,
    12317  alignment,
    12318  finalCreateInfo,
    12319  suballocType,
    12320  pAllocation);
    12321  if(res == VK_SUCCESS)
    12322  {
    12323  return res;
    12324  }
    12325 
    12326  // 5. Try dedicated memory.
    12327  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12328  {
    12329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12330  }
    12331  else
    12332  {
    12333  res = AllocateDedicatedMemory(
    12334  size,
    12335  suballocType,
    12336  memTypeIndex,
    12337  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    12338  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    12339  finalCreateInfo.pUserData,
    12340  dedicatedBuffer,
    12341  dedicatedImage,
    12342  pAllocation);
    12343  if(res == VK_SUCCESS)
    12344  {
    12345  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    12346  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    12347  return VK_SUCCESS;
    12348  }
    12349  else
    12350  {
    12351  // Everything failed: Return error code.
    12352  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12353  return res;
    12354  }
    12355  }
    12356  }
    12357 }
    12358 
    12359 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    12360  VkDeviceSize size,
    12361  VmaSuballocationType suballocType,
    12362  uint32_t memTypeIndex,
    12363  bool map,
    12364  bool isUserDataString,
    12365  void* pUserData,
    12366  VkBuffer dedicatedBuffer,
    12367  VkImage dedicatedImage,
    12368  VmaAllocation* pAllocation)
    12369 {
    12370  VMA_ASSERT(pAllocation);
    12371 
    12372  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12373  allocInfo.memoryTypeIndex = memTypeIndex;
    12374  allocInfo.allocationSize = size;
    12375 
    12376 #if VMA_DEDICATED_ALLOCATION
    12377  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    12378  if(m_UseKhrDedicatedAllocation)
    12379  {
    12380  if(dedicatedBuffer != VK_NULL_HANDLE)
    12381  {
    12382  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    12383  dedicatedAllocInfo.buffer = dedicatedBuffer;
    12384  allocInfo.pNext = &dedicatedAllocInfo;
    12385  }
    12386  else if(dedicatedImage != VK_NULL_HANDLE)
    12387  {
    12388  dedicatedAllocInfo.image = dedicatedImage;
    12389  allocInfo.pNext = &dedicatedAllocInfo;
    12390  }
    12391  }
    12392 #endif // #if VMA_DEDICATED_ALLOCATION
    12393 
    12394  // Allocate VkDeviceMemory.
    12395  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    12396  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    12397  if(res < 0)
    12398  {
    12399  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    12400  return res;
    12401  }
    12402 
    12403  void* pMappedData = VMA_NULL;
    12404  if(map)
    12405  {
    12406  res = (*m_VulkanFunctions.vkMapMemory)(
    12407  m_hDevice,
    12408  hMemory,
    12409  0,
    12410  VK_WHOLE_SIZE,
    12411  0,
    12412  &pMappedData);
    12413  if(res < 0)
    12414  {
    12415  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    12416  FreeVulkanMemory(memTypeIndex, size, hMemory);
    12417  return res;
    12418  }
    12419  }
    12420 
    12421  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
    12422  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    12423  (*pAllocation)->SetUserData(this, pUserData);
    12424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12425  {
    12426  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12427  }
    12428 
    12429  // Register it in m_pDedicatedAllocations.
    12430  {
    12431  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12432  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    12433  VMA_ASSERT(pDedicatedAllocations);
    12434  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
    12435  }
    12436 
    12437  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
    12438 
    12439  return VK_SUCCESS;
    12440 }
    12441 
    12442 void VmaAllocator_T::GetBufferMemoryRequirements(
    12443  VkBuffer hBuffer,
    12444  VkMemoryRequirements& memReq,
    12445  bool& requiresDedicatedAllocation,
    12446  bool& prefersDedicatedAllocation) const
    12447 {
    12448 #if VMA_DEDICATED_ALLOCATION
    12449  if(m_UseKhrDedicatedAllocation)
    12450  {
    12451  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12452  memReqInfo.buffer = hBuffer;
    12453 
    12454  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12455 
    12456  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12457  memReq2.pNext = &memDedicatedReq;
    12458 
    12459  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12460 
    12461  memReq = memReq2.memoryRequirements;
    12462  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12463  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12464  }
    12465  else
    12466 #endif // #if VMA_DEDICATED_ALLOCATION
    12467  {
    12468  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    12469  requiresDedicatedAllocation = false;
    12470  prefersDedicatedAllocation = false;
    12471  }
    12472 }
    12473 
    12474 void VmaAllocator_T::GetImageMemoryRequirements(
    12475  VkImage hImage,
    12476  VkMemoryRequirements& memReq,
    12477  bool& requiresDedicatedAllocation,
    12478  bool& prefersDedicatedAllocation) const
    12479 {
    12480 #if VMA_DEDICATED_ALLOCATION
    12481  if(m_UseKhrDedicatedAllocation)
    12482  {
    12483  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    12484  memReqInfo.image = hImage;
    12485 
    12486  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    12487 
    12488  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    12489  memReq2.pNext = &memDedicatedReq;
    12490 
    12491  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    12492 
    12493  memReq = memReq2.memoryRequirements;
    12494  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    12495  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    12496  }
    12497  else
    12498 #endif // #if VMA_DEDICATED_ALLOCATION
    12499  {
    12500  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    12501  requiresDedicatedAllocation = false;
    12502  prefersDedicatedAllocation = false;
    12503  }
    12504 }
    12505 
    12506 VkResult VmaAllocator_T::AllocateMemory(
    12507  const VkMemoryRequirements& vkMemReq,
    12508  bool requiresDedicatedAllocation,
    12509  bool prefersDedicatedAllocation,
    12510  VkBuffer dedicatedBuffer,
    12511  VkImage dedicatedImage,
    12512  const VmaAllocationCreateInfo& createInfo,
    12513  VmaSuballocationType suballocType,
    12514  VmaAllocation* pAllocation)
    12515 {
    12516  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    12517 
    12518  if(vkMemReq.size == 0)
    12519  {
    12520  return VK_ERROR_VALIDATION_FAILED_EXT;
    12521  }
    12522  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    12523  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12524  {
    12525  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    12526  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12527  }
    12528  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    12530  {
    12531  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    12532  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12533  }
    12534  if(requiresDedicatedAllocation)
    12535  {
    12536  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    12537  {
    12538  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    12539  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12540  }
    12541  if(createInfo.pool != VK_NULL_HANDLE)
    12542  {
    12543  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    12544  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12545  }
    12546  }
    12547  if((createInfo.pool != VK_NULL_HANDLE) &&
    12548  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    12549  {
    12550  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    12551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12552  }
    12553 
    12554  if(createInfo.pool != VK_NULL_HANDLE)
    12555  {
    12556  const VkDeviceSize alignmentForPool = VMA_MAX(
    12557  vkMemReq.alignment,
    12558  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    12559  return createInfo.pool->m_BlockVector.Allocate(
    12560  createInfo.pool,
    12561  m_CurrentFrameIndex.load(),
    12562  vkMemReq.size,
    12563  alignmentForPool,
    12564  createInfo,
    12565  suballocType,
    12566  pAllocation);
    12567  }
    12568  else
    12569  {
    12570  // Bit mask of memory Vulkan types acceptable for this allocation.
    12571  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    12572  uint32_t memTypeIndex = UINT32_MAX;
    12573  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12574  if(res == VK_SUCCESS)
    12575  {
    12576  VkDeviceSize alignmentForMemType = VMA_MAX(
    12577  vkMemReq.alignment,
    12578  GetMemoryTypeMinAlignment(memTypeIndex));
    12579 
    12580  res = AllocateMemoryOfType(
    12581  vkMemReq.size,
    12582  alignmentForMemType,
    12583  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12584  dedicatedBuffer,
    12585  dedicatedImage,
    12586  createInfo,
    12587  memTypeIndex,
    12588  suballocType,
    12589  pAllocation);
    12590  // Succeeded on first try.
    12591  if(res == VK_SUCCESS)
    12592  {
    12593  return res;
    12594  }
    12595  // Allocation from this memory type failed. Try other compatible memory types.
    12596  else
    12597  {
    12598  for(;;)
    12599  {
    12600  // Remove old memTypeIndex from list of possibilities.
    12601  memoryTypeBits &= ~(1u << memTypeIndex);
    12602  // Find alternative memTypeIndex.
    12603  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    12604  if(res == VK_SUCCESS)
    12605  {
    12606  alignmentForMemType = VMA_MAX(
    12607  vkMemReq.alignment,
    12608  GetMemoryTypeMinAlignment(memTypeIndex));
    12609 
    12610  res = AllocateMemoryOfType(
    12611  vkMemReq.size,
    12612  alignmentForMemType,
    12613  requiresDedicatedAllocation || prefersDedicatedAllocation,
    12614  dedicatedBuffer,
    12615  dedicatedImage,
    12616  createInfo,
    12617  memTypeIndex,
    12618  suballocType,
    12619  pAllocation);
    12620  // Allocation from this alternative memory type succeeded.
    12621  if(res == VK_SUCCESS)
    12622  {
    12623  return res;
    12624  }
    12625  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    12626  }
    12627  // No other matching memory type index could be found.
    12628  else
    12629  {
    12630  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    12631  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12632  }
    12633  }
    12634  }
    12635  }
    12636  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    12637  else
    12638  return res;
    12639  }
    12640 }
    12641 
    12642 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
    12643 {
    12644  VMA_ASSERT(allocation);
    12645 
    12646  if(TouchAllocation(allocation))
    12647  {
    12648  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12649  {
    12650  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    12651  }
    12652 
    12653  switch(allocation->GetType())
    12654  {
    12655  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12656  {
    12657  VmaBlockVector* pBlockVector = VMA_NULL;
    12658  VmaPool hPool = allocation->GetPool();
    12659  if(hPool != VK_NULL_HANDLE)
    12660  {
    12661  pBlockVector = &hPool->m_BlockVector;
    12662  }
    12663  else
    12664  {
    12665  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    12666  pBlockVector = m_pBlockVectors[memTypeIndex];
    12667  }
    12668  pBlockVector->Free(allocation);
    12669  }
    12670  break;
    12671  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12672  FreeDedicatedMemory(allocation);
    12673  break;
    12674  default:
    12675  VMA_ASSERT(0);
    12676  }
    12677  }
    12678 
    12679  allocation->SetUserData(this, VMA_NULL);
    12680  vma_delete(this, allocation);
    12681 }
    12682 
    12683 VkResult VmaAllocator_T::ResizeAllocation(
    12684  const VmaAllocation alloc,
    12685  VkDeviceSize newSize)
    12686 {
    12687  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    12688  {
    12689  return VK_ERROR_VALIDATION_FAILED_EXT;
    12690  }
    12691  if(newSize == alloc->GetSize())
    12692  {
    12693  return VK_SUCCESS;
    12694  }
    12695 
    12696  switch(alloc->GetType())
    12697  {
    12698  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    12699  return VK_ERROR_FEATURE_NOT_PRESENT;
    12700  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    12701  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    12702  {
    12703  alloc->ChangeSize(newSize);
    12704  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    12705  return VK_SUCCESS;
    12706  }
    12707  else
    12708  {
    12709  return VK_ERROR_OUT_OF_POOL_MEMORY;
    12710  }
    12711  default:
    12712  VMA_ASSERT(0);
    12713  return VK_ERROR_VALIDATION_FAILED_EXT;
    12714  }
    12715 }
    12716 
    12717 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    12718 {
    12719  // Initialize.
    12720  InitStatInfo(pStats->total);
    12721  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    12722  InitStatInfo(pStats->memoryType[i]);
    12723  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    12724  InitStatInfo(pStats->memoryHeap[i]);
    12725 
    12726  // Process default pools.
    12727  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12728  {
    12729  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    12730  VMA_ASSERT(pBlockVector);
    12731  pBlockVector->AddStats(pStats);
    12732  }
    12733 
    12734  // Process custom pools.
    12735  {
    12736  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    12737  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    12738  {
    12739  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    12740  }
    12741  }
    12742 
    12743  // Process dedicated allocations.
    12744  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    12745  {
    12746  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    12747  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    12748  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    12749  VMA_ASSERT(pDedicatedAllocVector);
    12750  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    12751  {
    12752  VmaStatInfo allocationStatInfo;
    12753  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    12754  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12755  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12756  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12757  }
    12758  }
    12759 
    12760  // Postprocess.
    12761  VmaPostprocessCalcStatInfo(pStats->total);
    12762  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    12763  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    12764  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    12765  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    12766 }
    12767 
    12768 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    12769 
    12770 VkResult VmaAllocator_T::Defragment(
    12771  VmaAllocation* pAllocations,
    12772  size_t allocationCount,
    12773  VkBool32* pAllocationsChanged,
    12774  const VmaDefragmentationInfo* pDefragmentationInfo,
    12775  VmaDefragmentationStats* pDefragmentationStats)
    12776 {
    12777  if(pAllocationsChanged != VMA_NULL)
    12778  {
    12779  memset(pAllocationsChanged, 0, allocationCount * sizeof(VkBool32));
    12780  }
    12781  if(pDefragmentationStats != VMA_NULL)
    12782  {
    12783  memset(pDefragmentationStats, 0, sizeof(*pDefragmentationStats));
    12784  }
    12785 
    12786  const uint32_t currentFrameIndex = m_CurrentFrameIndex.load();
    12787 
    12788  VmaMutexLock poolsLock(m_PoolsMutex, m_UseMutex);
    12789 
    12790  const size_t poolCount = m_Pools.size();
    12791 
    12792  // Dispatch pAllocations among defragmentators. Create them in BlockVectors when necessary.
    12793  for(size_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    12794  {
    12795  VmaAllocation hAlloc = pAllocations[allocIndex];
    12796  VMA_ASSERT(hAlloc);
    12797  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    12798  // DedicatedAlloc cannot be defragmented.
    12799  const VkMemoryPropertyFlags requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    12800  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    12801  // Only HOST_VISIBLE and HOST_COHERENT memory types can be defragmented.
    12802  ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags) &&
    12803  // Lost allocation cannot be defragmented.
    12804  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    12805  {
    12806  VmaBlockVector* pAllocBlockVector = VMA_NULL;
    12807 
    12808  const VmaPool hAllocPool = hAlloc->GetPool();
    12809  // This allocation belongs to custom pool.
    12810  if(hAllocPool != VK_NULL_HANDLE)
    12811  {
    12812  // Pools with linear or buddy algorithm are not defragmented.
    12813  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    12814  {
    12815  pAllocBlockVector = &hAllocPool->m_BlockVector;
    12816  }
    12817  }
    12818  // This allocation belongs to general pool.
    12819  else
    12820  {
    12821  pAllocBlockVector = m_pBlockVectors[memTypeIndex];
    12822  }
    12823 
    12824  if(pAllocBlockVector != VMA_NULL)
    12825  {
    12826  VmaDefragmentator* const pDefragmentator =
    12827  pAllocBlockVector->EnsureDefragmentator(this, currentFrameIndex);
    12828  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    12829  &pAllocationsChanged[allocIndex] : VMA_NULL;
    12830  pDefragmentator->AddAllocation(hAlloc, pChanged);
    12831  }
    12832  }
    12833  }
    12834 
    12835  VkResult result = VK_SUCCESS;
    12836 
    12837  // ======== Main processing.
    12838 
    12839  VkDeviceSize maxBytesToMove = SIZE_MAX;
    12840  uint32_t maxAllocationsToMove = UINT32_MAX;
    12841  if(pDefragmentationInfo != VMA_NULL)
    12842  {
    12843  maxBytesToMove = pDefragmentationInfo->maxBytesToMove;
    12844  maxAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    12845  }
    12846 
    12847  // Process standard memory.
    12848  for(uint32_t memTypeIndex = 0;
    12849  (memTypeIndex < GetMemoryTypeCount()) && (result == VK_SUCCESS);
    12850  ++memTypeIndex)
    12851  {
    12852  // Only HOST_VISIBLE memory types can be defragmented.
    12853  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12854  {
    12855  result = m_pBlockVectors[memTypeIndex]->Defragment(
    12856  pDefragmentationStats,
    12857  maxBytesToMove,
    12858  maxAllocationsToMove);
    12859  }
    12860  }
    12861 
    12862  // Process custom pools.
    12863  for(size_t poolIndex = 0; (poolIndex < poolCount) && (result == VK_SUCCESS); ++poolIndex)
    12864  {
    12865  result = m_Pools[poolIndex]->m_BlockVector.Defragment(
    12866  pDefragmentationStats,
    12867  maxBytesToMove,
    12868  maxAllocationsToMove);
    12869  }
    12870 
    12871  // ======== Destroy defragmentators.
    12872 
    12873  // Process custom pools.
    12874  for(size_t poolIndex = poolCount; poolIndex--; )
    12875  {
    12876  m_Pools[poolIndex]->m_BlockVector.DestroyDefragmentator();
    12877  }
    12878 
    12879  // Process standard memory.
    12880  for(uint32_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
    12881  {
    12882  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    12883  {
    12884  m_pBlockVectors[memTypeIndex]->DestroyDefragmentator();
    12885  }
    12886  }
    12887 
    12888  return result;
    12889 }
    12890 
    12891 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    12892 {
    12893  if(hAllocation->CanBecomeLost())
    12894  {
    12895  /*
    12896  Warning: This is a carefully designed algorithm.
    12897  Do not modify unless you really know what you're doing :)
    12898  */
    12899  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12900  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12901  for(;;)
    12902  {
    12903  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12904  {
    12905  pAllocationInfo->memoryType = UINT32_MAX;
    12906  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    12907  pAllocationInfo->offset = 0;
    12908  pAllocationInfo->size = hAllocation->GetSize();
    12909  pAllocationInfo->pMappedData = VMA_NULL;
    12910  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12911  return;
    12912  }
    12913  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12914  {
    12915  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12916  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12917  pAllocationInfo->offset = hAllocation->GetOffset();
    12918  pAllocationInfo->size = hAllocation->GetSize();
    12919  pAllocationInfo->pMappedData = VMA_NULL;
    12920  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12921  return;
    12922  }
    12923  else // Last use time earlier than current time.
    12924  {
    12925  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12926  {
    12927  localLastUseFrameIndex = localCurrFrameIndex;
    12928  }
    12929  }
    12930  }
    12931  }
    12932  else
    12933  {
    12934 #if VMA_STATS_STRING_ENABLED
    12935  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12936  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12937  for(;;)
    12938  {
    12939  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12940  if(localLastUseFrameIndex == localCurrFrameIndex)
    12941  {
    12942  break;
    12943  }
    12944  else // Last use time earlier than current time.
    12945  {
    12946  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12947  {
    12948  localLastUseFrameIndex = localCurrFrameIndex;
    12949  }
    12950  }
    12951  }
    12952 #endif
    12953 
    12954  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    12955  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    12956  pAllocationInfo->offset = hAllocation->GetOffset();
    12957  pAllocationInfo->size = hAllocation->GetSize();
    12958  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    12959  pAllocationInfo->pUserData = hAllocation->GetUserData();
    12960  }
    12961 }
    12962 
    12963 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    12964 {
    12965  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    12966  if(hAllocation->CanBecomeLost())
    12967  {
    12968  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12969  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12970  for(;;)
    12971  {
    12972  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    12973  {
    12974  return false;
    12975  }
    12976  else if(localLastUseFrameIndex == localCurrFrameIndex)
    12977  {
    12978  return true;
    12979  }
    12980  else // Last use time earlier than current time.
    12981  {
    12982  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    12983  {
    12984  localLastUseFrameIndex = localCurrFrameIndex;
    12985  }
    12986  }
    12987  }
    12988  }
    12989  else
    12990  {
    12991 #if VMA_STATS_STRING_ENABLED
    12992  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    12993  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    12994  for(;;)
    12995  {
    12996  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    12997  if(localLastUseFrameIndex == localCurrFrameIndex)
    12998  {
    12999  break;
    13000  }
    13001  else // Last use time earlier than current time.
    13002  {
    13003  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    13004  {
    13005  localLastUseFrameIndex = localCurrFrameIndex;
    13006  }
    13007  }
    13008  }
    13009 #endif
    13010 
    13011  return true;
    13012  }
    13013 }
    13014 
    13015 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    13016 {
    13017  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    13018 
    13019  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    13020 
    13021  if(newCreateInfo.maxBlockCount == 0)
    13022  {
    13023  newCreateInfo.maxBlockCount = SIZE_MAX;
    13024  }
    13025  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    13026  {
    13027  return VK_ERROR_INITIALIZATION_FAILED;
    13028  }
    13029 
    13030  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    13031 
    13032  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    13033 
    13034  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    13035  if(res != VK_SUCCESS)
    13036  {
    13037  vma_delete(this, *pPool);
    13038  *pPool = VMA_NULL;
    13039  return res;
    13040  }
    13041 
    13042  // Add to m_Pools.
    13043  {
    13044  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13045  (*pPool)->SetId(m_NextPoolId++);
    13046  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    13047  }
    13048 
    13049  return VK_SUCCESS;
    13050 }
    13051 
    13052 void VmaAllocator_T::DestroyPool(VmaPool pool)
    13053 {
    13054  // Remove from m_Pools.
    13055  {
    13056  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13057  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    13058  VMA_ASSERT(success && "Pool not found in Allocator.");
    13059  }
    13060 
    13061  vma_delete(this, pool);
    13062 }
    13063 
    13064 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    13065 {
    13066  pool->m_BlockVector.GetPoolStats(pPoolStats);
    13067 }
    13068 
    13069 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    13070 {
    13071  m_CurrentFrameIndex.store(frameIndex);
    13072 }
    13073 
    13074 void VmaAllocator_T::MakePoolAllocationsLost(
    13075  VmaPool hPool,
    13076  size_t* pLostAllocationCount)
    13077 {
    13078  hPool->m_BlockVector.MakePoolAllocationsLost(
    13079  m_CurrentFrameIndex.load(),
    13080  pLostAllocationCount);
    13081 }
    13082 
    13083 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    13084 {
    13085  return hPool->m_BlockVector.CheckCorruption();
    13086 }
    13087 
    13088 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    13089 {
    13090  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    13091 
    13092  // Process default pools.
    13093  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13094  {
    13095  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    13096  {
    13097  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    13098  VMA_ASSERT(pBlockVector);
    13099  VkResult localRes = pBlockVector->CheckCorruption();
    13100  switch(localRes)
    13101  {
    13102  case VK_ERROR_FEATURE_NOT_PRESENT:
    13103  break;
    13104  case VK_SUCCESS:
    13105  finalRes = VK_SUCCESS;
    13106  break;
    13107  default:
    13108  return localRes;
    13109  }
    13110  }
    13111  }
    13112 
    13113  // Process custom pools.
    13114  {
    13115  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13116  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    13117  {
    13118  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    13119  {
    13120  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    13121  switch(localRes)
    13122  {
    13123  case VK_ERROR_FEATURE_NOT_PRESENT:
    13124  break;
    13125  case VK_SUCCESS:
    13126  finalRes = VK_SUCCESS;
    13127  break;
    13128  default:
    13129  return localRes;
    13130  }
    13131  }
    13132  }
    13133  }
    13134 
    13135  return finalRes;
    13136 }
    13137 
    13138 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    13139 {
    13140  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
    13141  (*pAllocation)->InitLost();
    13142 }
    13143 
    13144 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    13145 {
    13146  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    13147 
    13148  VkResult res;
    13149  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13150  {
    13151  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13152  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    13153  {
    13154  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13155  if(res == VK_SUCCESS)
    13156  {
    13157  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    13158  }
    13159  }
    13160  else
    13161  {
    13162  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    13163  }
    13164  }
    13165  else
    13166  {
    13167  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    13168  }
    13169 
    13170  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    13171  {
    13172  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    13173  }
    13174 
    13175  return res;
    13176 }
    13177 
    13178 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    13179 {
    13180  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    13181  {
    13182  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    13183  }
    13184 
    13185  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    13186 
    13187  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    13188  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    13189  {
    13190  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    13191  m_HeapSizeLimit[heapIndex] += size;
    13192  }
    13193 }
    13194 
    13195 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    13196 {
    13197  if(hAllocation->CanBecomeLost())
    13198  {
    13199  return VK_ERROR_MEMORY_MAP_FAILED;
    13200  }
    13201 
    13202  switch(hAllocation->GetType())
    13203  {
    13204  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13205  {
    13206  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13207  char *pBytes = VMA_NULL;
    13208  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    13209  if(res == VK_SUCCESS)
    13210  {
    13211  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    13212  hAllocation->BlockAllocMap();
    13213  }
    13214  return res;
    13215  }
    13216  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13217  return hAllocation->DedicatedAllocMap(this, ppData);
    13218  default:
    13219  VMA_ASSERT(0);
    13220  return VK_ERROR_MEMORY_MAP_FAILED;
    13221  }
    13222 }
    13223 
    13224 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    13225 {
    13226  switch(hAllocation->GetType())
    13227  {
    13228  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13229  {
    13230  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    13231  hAllocation->BlockAllocUnmap();
    13232  pBlock->Unmap(this, 1);
    13233  }
    13234  break;
    13235  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13236  hAllocation->DedicatedAllocUnmap(this);
    13237  break;
    13238  default:
    13239  VMA_ASSERT(0);
    13240  }
    13241 }
    13242 
    13243 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    13244 {
    13245  VkResult res = VK_SUCCESS;
    13246  switch(hAllocation->GetType())
    13247  {
    13248  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13249  res = GetVulkanFunctions().vkBindBufferMemory(
    13250  m_hDevice,
    13251  hBuffer,
    13252  hAllocation->GetMemory(),
    13253  0); //memoryOffset
    13254  break;
    13255  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13256  {
    13257  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13258  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    13259  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    13260  break;
    13261  }
    13262  default:
    13263  VMA_ASSERT(0);
    13264  }
    13265  return res;
    13266 }
    13267 
    13268 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    13269 {
    13270  VkResult res = VK_SUCCESS;
    13271  switch(hAllocation->GetType())
    13272  {
    13273  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13274  res = GetVulkanFunctions().vkBindImageMemory(
    13275  m_hDevice,
    13276  hImage,
    13277  hAllocation->GetMemory(),
    13278  0); //memoryOffset
    13279  break;
    13280  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13281  {
    13282  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    13283  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    13284  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    13285  break;
    13286  }
    13287  default:
    13288  VMA_ASSERT(0);
    13289  }
    13290  return res;
    13291 }
    13292 
    13293 void VmaAllocator_T::FlushOrInvalidateAllocation(
    13294  VmaAllocation hAllocation,
    13295  VkDeviceSize offset, VkDeviceSize size,
    13296  VMA_CACHE_OPERATION op)
    13297 {
    13298  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    13299  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    13300  {
    13301  const VkDeviceSize allocationSize = hAllocation->GetSize();
    13302  VMA_ASSERT(offset <= allocationSize);
    13303 
    13304  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    13305 
    13306  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    13307  memRange.memory = hAllocation->GetMemory();
    13308 
    13309  switch(hAllocation->GetType())
    13310  {
    13311  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    13312  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13313  if(size == VK_WHOLE_SIZE)
    13314  {
    13315  memRange.size = allocationSize - memRange.offset;
    13316  }
    13317  else
    13318  {
    13319  VMA_ASSERT(offset + size <= allocationSize);
    13320  memRange.size = VMA_MIN(
    13321  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    13322  allocationSize - memRange.offset);
    13323  }
    13324  break;
    13325 
    13326  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    13327  {
    13328  // 1. Still within this allocation.
    13329  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    13330  if(size == VK_WHOLE_SIZE)
    13331  {
    13332  size = allocationSize - offset;
    13333  }
    13334  else
    13335  {
    13336  VMA_ASSERT(offset + size <= allocationSize);
    13337  }
    13338  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    13339 
    13340  // 2. Adjust to whole block.
    13341  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    13342  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    13343  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    13344  memRange.offset += allocationOffset;
    13345  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    13346 
    13347  break;
    13348  }
    13349 
    13350  default:
    13351  VMA_ASSERT(0);
    13352  }
    13353 
    13354  switch(op)
    13355  {
    13356  case VMA_CACHE_FLUSH:
    13357  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13358  break;
    13359  case VMA_CACHE_INVALIDATE:
    13360  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    13361  break;
    13362  default:
    13363  VMA_ASSERT(0);
    13364  }
    13365  }
    13366  // else: Just ignore this call.
    13367 }
    13368 
    13369 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    13370 {
    13371  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    13372 
    13373  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    13374  {
    13375  VmaMutexLock lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13376  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    13377  VMA_ASSERT(pDedicatedAllocations);
    13378  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    13379  VMA_ASSERT(success);
    13380  }
    13381 
    13382  VkDeviceMemory hMemory = allocation->GetMemory();
    13383 
    13384  /*
    13385  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    13386  before vkFreeMemory.
    13387 
    13388  if(allocation->GetMappedData() != VMA_NULL)
    13389  {
    13390  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    13391  }
    13392  */
    13393 
    13394  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    13395 
    13396  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    13397 }
    13398 
    13399 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    13400 {
    13401  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    13402  !hAllocation->CanBecomeLost() &&
    13403  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13404  {
    13405  void* pData = VMA_NULL;
    13406  VkResult res = Map(hAllocation, &pData);
    13407  if(res == VK_SUCCESS)
    13408  {
    13409  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    13410  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    13411  Unmap(hAllocation);
    13412  }
    13413  else
    13414  {
    13415  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    13416  }
    13417  }
    13418 }
    13419 
    13420 #if VMA_STATS_STRING_ENABLED
    13421 
    13422 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    13423 {
    13424  bool dedicatedAllocationsStarted = false;
    13425  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13426  {
    13427  VmaMutexLock dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    13428  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    13429  VMA_ASSERT(pDedicatedAllocVector);
    13430  if(pDedicatedAllocVector->empty() == false)
    13431  {
    13432  if(dedicatedAllocationsStarted == false)
    13433  {
    13434  dedicatedAllocationsStarted = true;
    13435  json.WriteString("DedicatedAllocations");
    13436  json.BeginObject();
    13437  }
    13438 
    13439  json.BeginString("Type ");
    13440  json.ContinueString(memTypeIndex);
    13441  json.EndString();
    13442 
    13443  json.BeginArray();
    13444 
    13445  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    13446  {
    13447  json.BeginObject(true);
    13448  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    13449  hAlloc->PrintParameters(json);
    13450  json.EndObject();
    13451  }
    13452 
    13453  json.EndArray();
    13454  }
    13455  }
    13456  if(dedicatedAllocationsStarted)
    13457  {
    13458  json.EndObject();
    13459  }
    13460 
    13461  {
    13462  bool allocationsStarted = false;
    13463  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    13464  {
    13465  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    13466  {
    13467  if(allocationsStarted == false)
    13468  {
    13469  allocationsStarted = true;
    13470  json.WriteString("DefaultPools");
    13471  json.BeginObject();
    13472  }
    13473 
    13474  json.BeginString("Type ");
    13475  json.ContinueString(memTypeIndex);
    13476  json.EndString();
    13477 
    13478  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    13479  }
    13480  }
    13481  if(allocationsStarted)
    13482  {
    13483  json.EndObject();
    13484  }
    13485  }
    13486 
    13487  // Custom pools
    13488  {
    13489  VmaMutexLock lock(m_PoolsMutex, m_UseMutex);
    13490  const size_t poolCount = m_Pools.size();
    13491  if(poolCount > 0)
    13492  {
    13493  json.WriteString("Pools");
    13494  json.BeginObject();
    13495  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13496  {
    13497  json.BeginString();
    13498  json.ContinueString(m_Pools[poolIndex]->GetId());
    13499  json.EndString();
    13500 
    13501  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    13502  }
    13503  json.EndObject();
    13504  }
    13505  }
    13506 }
    13507 
    13508 #endif // #if VMA_STATS_STRING_ENABLED
    13509 
    13511 // Public interface
    13512 
    13513 VkResult vmaCreateAllocator(
    13514  const VmaAllocatorCreateInfo* pCreateInfo,
    13515  VmaAllocator* pAllocator)
    13516 {
    13517  VMA_ASSERT(pCreateInfo && pAllocator);
    13518  VMA_DEBUG_LOG("vmaCreateAllocator");
    13519  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    13520  return (*pAllocator)->Init(pCreateInfo);
    13521 }
    13522 
    13523 void vmaDestroyAllocator(
    13524  VmaAllocator allocator)
    13525 {
    13526  if(allocator != VK_NULL_HANDLE)
    13527  {
    13528  VMA_DEBUG_LOG("vmaDestroyAllocator");
    13529  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    13530  vma_delete(&allocationCallbacks, allocator);
    13531  }
    13532 }
    13533 
    13535  VmaAllocator allocator,
    13536  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    13537 {
    13538  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    13539  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    13540 }
    13541 
    13543  VmaAllocator allocator,
    13544  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    13545 {
    13546  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    13547  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    13548 }
    13549 
    13551  VmaAllocator allocator,
    13552  uint32_t memoryTypeIndex,
    13553  VkMemoryPropertyFlags* pFlags)
    13554 {
    13555  VMA_ASSERT(allocator && pFlags);
    13556  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    13557  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    13558 }
    13559 
    13561  VmaAllocator allocator,
    13562  uint32_t frameIndex)
    13563 {
    13564  VMA_ASSERT(allocator);
    13565  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    13566 
    13567  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13568 
    13569  allocator->SetCurrentFrameIndex(frameIndex);
    13570 }
    13571 
    13572 void vmaCalculateStats(
    13573  VmaAllocator allocator,
    13574  VmaStats* pStats)
    13575 {
    13576  VMA_ASSERT(allocator && pStats);
    13577  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13578  allocator->CalculateStats(pStats);
    13579 }
    13580 
    13581 #if VMA_STATS_STRING_ENABLED
    13582 
    13583 void vmaBuildStatsString(
    13584  VmaAllocator allocator,
    13585  char** ppStatsString,
    13586  VkBool32 detailedMap)
    13587 {
    13588  VMA_ASSERT(allocator && ppStatsString);
    13589  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13590 
    13591  VmaStringBuilder sb(allocator);
    13592  {
    13593  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    13594  json.BeginObject();
    13595 
    13596  VmaStats stats;
    13597  allocator->CalculateStats(&stats);
    13598 
    13599  json.WriteString("Total");
    13600  VmaPrintStatInfo(json, stats.total);
    13601 
    13602  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    13603  {
    13604  json.BeginString("Heap ");
    13605  json.ContinueString(heapIndex);
    13606  json.EndString();
    13607  json.BeginObject();
    13608 
    13609  json.WriteString("Size");
    13610  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    13611 
    13612  json.WriteString("Flags");
    13613  json.BeginArray(true);
    13614  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    13615  {
    13616  json.WriteString("DEVICE_LOCAL");
    13617  }
    13618  json.EndArray();
    13619 
    13620  if(stats.memoryHeap[heapIndex].blockCount > 0)
    13621  {
    13622  json.WriteString("Stats");
    13623  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    13624  }
    13625 
    13626  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    13627  {
    13628  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    13629  {
    13630  json.BeginString("Type ");
    13631  json.ContinueString(typeIndex);
    13632  json.EndString();
    13633 
    13634  json.BeginObject();
    13635 
    13636  json.WriteString("Flags");
    13637  json.BeginArray(true);
    13638  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    13639  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    13640  {
    13641  json.WriteString("DEVICE_LOCAL");
    13642  }
    13643  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    13644  {
    13645  json.WriteString("HOST_VISIBLE");
    13646  }
    13647  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    13648  {
    13649  json.WriteString("HOST_COHERENT");
    13650  }
    13651  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    13652  {
    13653  json.WriteString("HOST_CACHED");
    13654  }
    13655  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    13656  {
    13657  json.WriteString("LAZILY_ALLOCATED");
    13658  }
    13659  json.EndArray();
    13660 
    13661  if(stats.memoryType[typeIndex].blockCount > 0)
    13662  {
    13663  json.WriteString("Stats");
    13664  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    13665  }
    13666 
    13667  json.EndObject();
    13668  }
    13669  }
    13670 
    13671  json.EndObject();
    13672  }
    13673  if(detailedMap == VK_TRUE)
    13674  {
    13675  allocator->PrintDetailedMap(json);
    13676  }
    13677 
    13678  json.EndObject();
    13679  }
    13680 
    13681  const size_t len = sb.GetLength();
    13682  char* const pChars = vma_new_array(allocator, char, len + 1);
    13683  if(len > 0)
    13684  {
    13685  memcpy(pChars, sb.GetData(), len);
    13686  }
    13687  pChars[len] = '\0';
    13688  *ppStatsString = pChars;
    13689 }
    13690 
    13691 void vmaFreeStatsString(
    13692  VmaAllocator allocator,
    13693  char* pStatsString)
    13694 {
    13695  if(pStatsString != VMA_NULL)
    13696  {
    13697  VMA_ASSERT(allocator);
    13698  size_t len = strlen(pStatsString);
    13699  vma_delete_array(allocator, pStatsString, len + 1);
    13700  }
    13701 }
    13702 
    13703 #endif // #if VMA_STATS_STRING_ENABLED
    13704 
    13705 /*
    13706 This function is not protected by any mutex because it just reads immutable data.
    13707 */
    13708 VkResult vmaFindMemoryTypeIndex(
    13709  VmaAllocator allocator,
    13710  uint32_t memoryTypeBits,
    13711  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13712  uint32_t* pMemoryTypeIndex)
    13713 {
    13714  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13715  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13716  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13717 
    13718  if(pAllocationCreateInfo->memoryTypeBits != 0)
    13719  {
    13720  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    13721  }
    13722 
    13723  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    13724  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    13725 
    13726  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    13727  if(mapped)
    13728  {
    13729  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13730  }
    13731 
    13732  // Convert usage to requiredFlags and preferredFlags.
    13733  switch(pAllocationCreateInfo->usage)
    13734  {
    13736  break;
    13738  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13739  {
    13740  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13741  }
    13742  break;
    13744  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    13745  break;
    13747  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13748  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    13749  {
    13750  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    13751  }
    13752  break;
    13754  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    13755  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    13756  break;
    13757  default:
    13758  break;
    13759  }
    13760 
    13761  *pMemoryTypeIndex = UINT32_MAX;
    13762  uint32_t minCost = UINT32_MAX;
    13763  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    13764  memTypeIndex < allocator->GetMemoryTypeCount();
    13765  ++memTypeIndex, memTypeBit <<= 1)
    13766  {
    13767  // This memory type is acceptable according to memoryTypeBits bitmask.
    13768  if((memTypeBit & memoryTypeBits) != 0)
    13769  {
    13770  const VkMemoryPropertyFlags currFlags =
    13771  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    13772  // This memory type contains requiredFlags.
    13773  if((requiredFlags & ~currFlags) == 0)
    13774  {
    13775  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    13776  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    13777  // Remember memory type with lowest cost.
    13778  if(currCost < minCost)
    13779  {
    13780  *pMemoryTypeIndex = memTypeIndex;
    13781  if(currCost == 0)
    13782  {
    13783  return VK_SUCCESS;
    13784  }
    13785  minCost = currCost;
    13786  }
    13787  }
    13788  }
    13789  }
    13790  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    13791 }
    13792 
    13794  VmaAllocator allocator,
    13795  const VkBufferCreateInfo* pBufferCreateInfo,
    13796  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13797  uint32_t* pMemoryTypeIndex)
    13798 {
    13799  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13800  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    13801  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13802  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13803 
    13804  const VkDevice hDev = allocator->m_hDevice;
    13805  VkBuffer hBuffer = VK_NULL_HANDLE;
    13806  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    13807  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    13808  if(res == VK_SUCCESS)
    13809  {
    13810  VkMemoryRequirements memReq = {};
    13811  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    13812  hDev, hBuffer, &memReq);
    13813 
    13814  res = vmaFindMemoryTypeIndex(
    13815  allocator,
    13816  memReq.memoryTypeBits,
    13817  pAllocationCreateInfo,
    13818  pMemoryTypeIndex);
    13819 
    13820  allocator->GetVulkanFunctions().vkDestroyBuffer(
    13821  hDev, hBuffer, allocator->GetAllocationCallbacks());
    13822  }
    13823  return res;
    13824 }
    13825 
    13827  VmaAllocator allocator,
    13828  const VkImageCreateInfo* pImageCreateInfo,
    13829  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    13830  uint32_t* pMemoryTypeIndex)
    13831 {
    13832  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    13833  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    13834  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    13835  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    13836 
    13837  const VkDevice hDev = allocator->m_hDevice;
    13838  VkImage hImage = VK_NULL_HANDLE;
    13839  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    13840  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    13841  if(res == VK_SUCCESS)
    13842  {
    13843  VkMemoryRequirements memReq = {};
    13844  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    13845  hDev, hImage, &memReq);
    13846 
    13847  res = vmaFindMemoryTypeIndex(
    13848  allocator,
    13849  memReq.memoryTypeBits,
    13850  pAllocationCreateInfo,
    13851  pMemoryTypeIndex);
    13852 
    13853  allocator->GetVulkanFunctions().vkDestroyImage(
    13854  hDev, hImage, allocator->GetAllocationCallbacks());
    13855  }
    13856  return res;
    13857 }
    13858 
    13859 VkResult vmaCreatePool(
    13860  VmaAllocator allocator,
    13861  const VmaPoolCreateInfo* pCreateInfo,
    13862  VmaPool* pPool)
    13863 {
    13864  VMA_ASSERT(allocator && pCreateInfo && pPool);
    13865 
    13866  VMA_DEBUG_LOG("vmaCreatePool");
    13867 
    13868  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13869 
    13870  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    13871 
    13872 #if VMA_RECORDING_ENABLED
    13873  if(allocator->GetRecorder() != VMA_NULL)
    13874  {
    13875  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    13876  }
    13877 #endif
    13878 
    13879  return res;
    13880 }
    13881 
    13882 void vmaDestroyPool(
    13883  VmaAllocator allocator,
    13884  VmaPool pool)
    13885 {
    13886  VMA_ASSERT(allocator);
    13887 
    13888  if(pool == VK_NULL_HANDLE)
    13889  {
    13890  return;
    13891  }
    13892 
    13893  VMA_DEBUG_LOG("vmaDestroyPool");
    13894 
    13895  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13896 
    13897 #if VMA_RECORDING_ENABLED
    13898  if(allocator->GetRecorder() != VMA_NULL)
    13899  {
    13900  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    13901  }
    13902 #endif
    13903 
    13904  allocator->DestroyPool(pool);
    13905 }
    13906 
    13907 void vmaGetPoolStats(
    13908  VmaAllocator allocator,
    13909  VmaPool pool,
    13910  VmaPoolStats* pPoolStats)
    13911 {
    13912  VMA_ASSERT(allocator && pool && pPoolStats);
    13913 
    13914  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13915 
    13916  allocator->GetPoolStats(pool, pPoolStats);
    13917 }
    13918 
    13920  VmaAllocator allocator,
    13921  VmaPool pool,
    13922  size_t* pLostAllocationCount)
    13923 {
    13924  VMA_ASSERT(allocator && pool);
    13925 
    13926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13927 
    13928 #if VMA_RECORDING_ENABLED
    13929  if(allocator->GetRecorder() != VMA_NULL)
    13930  {
    13931  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    13932  }
    13933 #endif
    13934 
    13935  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    13936 }
    13937 
    13938 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    13939 {
    13940  VMA_ASSERT(allocator && pool);
    13941 
    13942  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13943 
    13944  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    13945 
    13946  return allocator->CheckPoolCorruption(pool);
    13947 }
    13948 
    13949 VkResult vmaAllocateMemory(
    13950  VmaAllocator allocator,
    13951  const VkMemoryRequirements* pVkMemoryRequirements,
    13952  const VmaAllocationCreateInfo* pCreateInfo,
    13953  VmaAllocation* pAllocation,
    13954  VmaAllocationInfo* pAllocationInfo)
    13955 {
    13956  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    13957 
    13958  VMA_DEBUG_LOG("vmaAllocateMemory");
    13959 
    13960  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    13961 
    13962  VkResult result = allocator->AllocateMemory(
    13963  *pVkMemoryRequirements,
    13964  false, // requiresDedicatedAllocation
    13965  false, // prefersDedicatedAllocation
    13966  VK_NULL_HANDLE, // dedicatedBuffer
    13967  VK_NULL_HANDLE, // dedicatedImage
    13968  *pCreateInfo,
    13969  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    13970  pAllocation);
    13971 
    13972 #if VMA_RECORDING_ENABLED
    13973  if(allocator->GetRecorder() != VMA_NULL)
    13974  {
    13975  allocator->GetRecorder()->RecordAllocateMemory(
    13976  allocator->GetCurrentFrameIndex(),
    13977  *pVkMemoryRequirements,
    13978  *pCreateInfo,
    13979  *pAllocation);
    13980  }
    13981 #endif
    13982 
    13983  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    13984  {
    13985  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    13986  }
    13987 
    13988  return result;
    13989 }
    13990 
    13992  VmaAllocator allocator,
    13993  VkBuffer buffer,
    13994  const VmaAllocationCreateInfo* pCreateInfo,
    13995  VmaAllocation* pAllocation,
    13996  VmaAllocationInfo* pAllocationInfo)
    13997 {
    13998  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    13999 
    14000  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    14001 
    14002  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14003 
    14004  VkMemoryRequirements vkMemReq = {};
    14005  bool requiresDedicatedAllocation = false;
    14006  bool prefersDedicatedAllocation = false;
    14007  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    14008  requiresDedicatedAllocation,
    14009  prefersDedicatedAllocation);
    14010 
    14011  VkResult result = allocator->AllocateMemory(
    14012  vkMemReq,
    14013  requiresDedicatedAllocation,
    14014  prefersDedicatedAllocation,
    14015  buffer, // dedicatedBuffer
    14016  VK_NULL_HANDLE, // dedicatedImage
    14017  *pCreateInfo,
    14018  VMA_SUBALLOCATION_TYPE_BUFFER,
    14019  pAllocation);
    14020 
    14021 #if VMA_RECORDING_ENABLED
    14022  if(allocator->GetRecorder() != VMA_NULL)
    14023  {
    14024  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    14025  allocator->GetCurrentFrameIndex(),
    14026  vkMemReq,
    14027  requiresDedicatedAllocation,
    14028  prefersDedicatedAllocation,
    14029  *pCreateInfo,
    14030  *pAllocation);
    14031  }
    14032 #endif
    14033 
    14034  if(pAllocationInfo && result == VK_SUCCESS)
    14035  {
    14036  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14037  }
    14038 
    14039  return result;
    14040 }
    14041 
    14042 VkResult vmaAllocateMemoryForImage(
    14043  VmaAllocator allocator,
    14044  VkImage image,
    14045  const VmaAllocationCreateInfo* pCreateInfo,
    14046  VmaAllocation* pAllocation,
    14047  VmaAllocationInfo* pAllocationInfo)
    14048 {
    14049  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    14050 
    14051  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    14052 
    14053  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14054 
    14055  VkMemoryRequirements vkMemReq = {};
    14056  bool requiresDedicatedAllocation = false;
    14057  bool prefersDedicatedAllocation = false;
    14058  allocator->GetImageMemoryRequirements(image, vkMemReq,
    14059  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14060 
    14061  VkResult result = allocator->AllocateMemory(
    14062  vkMemReq,
    14063  requiresDedicatedAllocation,
    14064  prefersDedicatedAllocation,
    14065  VK_NULL_HANDLE, // dedicatedBuffer
    14066  image, // dedicatedImage
    14067  *pCreateInfo,
    14068  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    14069  pAllocation);
    14070 
    14071 #if VMA_RECORDING_ENABLED
    14072  if(allocator->GetRecorder() != VMA_NULL)
    14073  {
    14074  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    14075  allocator->GetCurrentFrameIndex(),
    14076  vkMemReq,
    14077  requiresDedicatedAllocation,
    14078  prefersDedicatedAllocation,
    14079  *pCreateInfo,
    14080  *pAllocation);
    14081  }
    14082 #endif
    14083 
    14084  if(pAllocationInfo && result == VK_SUCCESS)
    14085  {
    14086  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14087  }
    14088 
    14089  return result;
    14090 }
    14091 
    14092 void vmaFreeMemory(
    14093  VmaAllocator allocator,
    14094  VmaAllocation allocation)
    14095 {
    14096  VMA_ASSERT(allocator);
    14097 
    14098  if(allocation == VK_NULL_HANDLE)
    14099  {
    14100  return;
    14101  }
    14102 
    14103  VMA_DEBUG_LOG("vmaFreeMemory");
    14104 
    14105  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14106 
    14107 #if VMA_RECORDING_ENABLED
    14108  if(allocator->GetRecorder() != VMA_NULL)
    14109  {
    14110  allocator->GetRecorder()->RecordFreeMemory(
    14111  allocator->GetCurrentFrameIndex(),
    14112  allocation);
    14113  }
    14114 #endif
    14115 
    14116  allocator->FreeMemory(allocation);
    14117 }
    14118 
    14119 VkResult vmaResizeAllocation(
    14120  VmaAllocator allocator,
    14121  VmaAllocation allocation,
    14122  VkDeviceSize newSize)
    14123 {
    14124  VMA_ASSERT(allocator && allocation);
    14125 
    14126  VMA_DEBUG_LOG("vmaResizeAllocation");
    14127 
    14128  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14129 
    14130 #if VMA_RECORDING_ENABLED
    14131  if(allocator->GetRecorder() != VMA_NULL)
    14132  {
    14133  allocator->GetRecorder()->RecordResizeAllocation(
    14134  allocator->GetCurrentFrameIndex(),
    14135  allocation,
    14136  newSize);
    14137  }
    14138 #endif
    14139 
    14140  return allocator->ResizeAllocation(allocation, newSize);
    14141 }
    14142 
    14144  VmaAllocator allocator,
    14145  VmaAllocation allocation,
    14146  VmaAllocationInfo* pAllocationInfo)
    14147 {
    14148  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    14149 
    14150  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14151 
    14152 #if VMA_RECORDING_ENABLED
    14153  if(allocator->GetRecorder() != VMA_NULL)
    14154  {
    14155  allocator->GetRecorder()->RecordGetAllocationInfo(
    14156  allocator->GetCurrentFrameIndex(),
    14157  allocation);
    14158  }
    14159 #endif
    14160 
    14161  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    14162 }
    14163 
    14164 VkBool32 vmaTouchAllocation(
    14165  VmaAllocator allocator,
    14166  VmaAllocation allocation)
    14167 {
    14168  VMA_ASSERT(allocator && allocation);
    14169 
    14170  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14171 
    14172 #if VMA_RECORDING_ENABLED
    14173  if(allocator->GetRecorder() != VMA_NULL)
    14174  {
    14175  allocator->GetRecorder()->RecordTouchAllocation(
    14176  allocator->GetCurrentFrameIndex(),
    14177  allocation);
    14178  }
    14179 #endif
    14180 
    14181  return allocator->TouchAllocation(allocation);
    14182 }
    14183 
    14185  VmaAllocator allocator,
    14186  VmaAllocation allocation,
    14187  void* pUserData)
    14188 {
    14189  VMA_ASSERT(allocator && allocation);
    14190 
    14191  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14192 
    14193  allocation->SetUserData(allocator, pUserData);
    14194 
    14195 #if VMA_RECORDING_ENABLED
    14196  if(allocator->GetRecorder() != VMA_NULL)
    14197  {
    14198  allocator->GetRecorder()->RecordSetAllocationUserData(
    14199  allocator->GetCurrentFrameIndex(),
    14200  allocation,
    14201  pUserData);
    14202  }
    14203 #endif
    14204 }
    14205 
    14207  VmaAllocator allocator,
    14208  VmaAllocation* pAllocation)
    14209 {
    14210  VMA_ASSERT(allocator && pAllocation);
    14211 
    14212  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    14213 
    14214  allocator->CreateLostAllocation(pAllocation);
    14215 
    14216 #if VMA_RECORDING_ENABLED
    14217  if(allocator->GetRecorder() != VMA_NULL)
    14218  {
    14219  allocator->GetRecorder()->RecordCreateLostAllocation(
    14220  allocator->GetCurrentFrameIndex(),
    14221  *pAllocation);
    14222  }
    14223 #endif
    14224 }
    14225 
    14226 VkResult vmaMapMemory(
    14227  VmaAllocator allocator,
    14228  VmaAllocation allocation,
    14229  void** ppData)
    14230 {
    14231  VMA_ASSERT(allocator && allocation && ppData);
    14232 
    14233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14234 
    14235  VkResult res = allocator->Map(allocation, ppData);
    14236 
    14237 #if VMA_RECORDING_ENABLED
    14238  if(allocator->GetRecorder() != VMA_NULL)
    14239  {
    14240  allocator->GetRecorder()->RecordMapMemory(
    14241  allocator->GetCurrentFrameIndex(),
    14242  allocation);
    14243  }
    14244 #endif
    14245 
    14246  return res;
    14247 }
    14248 
    14249 void vmaUnmapMemory(
    14250  VmaAllocator allocator,
    14251  VmaAllocation allocation)
    14252 {
    14253  VMA_ASSERT(allocator && allocation);
    14254 
    14255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14256 
    14257 #if VMA_RECORDING_ENABLED
    14258  if(allocator->GetRecorder() != VMA_NULL)
    14259  {
    14260  allocator->GetRecorder()->RecordUnmapMemory(
    14261  allocator->GetCurrentFrameIndex(),
    14262  allocation);
    14263  }
    14264 #endif
    14265 
    14266  allocator->Unmap(allocation);
    14267 }
    14268 
    14269 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14270 {
    14271  VMA_ASSERT(allocator && allocation);
    14272 
    14273  VMA_DEBUG_LOG("vmaFlushAllocation");
    14274 
    14275  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14276 
    14277  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    14278 
    14279 #if VMA_RECORDING_ENABLED
    14280  if(allocator->GetRecorder() != VMA_NULL)
    14281  {
    14282  allocator->GetRecorder()->RecordFlushAllocation(
    14283  allocator->GetCurrentFrameIndex(),
    14284  allocation, offset, size);
    14285  }
    14286 #endif
    14287 }
    14288 
    14289 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    14290 {
    14291  VMA_ASSERT(allocator && allocation);
    14292 
    14293  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    14294 
    14295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14296 
    14297  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    14298 
    14299 #if VMA_RECORDING_ENABLED
    14300  if(allocator->GetRecorder() != VMA_NULL)
    14301  {
    14302  allocator->GetRecorder()->RecordInvalidateAllocation(
    14303  allocator->GetCurrentFrameIndex(),
    14304  allocation, offset, size);
    14305  }
    14306 #endif
    14307 }
    14308 
    14309 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    14310 {
    14311  VMA_ASSERT(allocator);
    14312 
    14313  VMA_DEBUG_LOG("vmaCheckCorruption");
    14314 
    14315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14316 
    14317  return allocator->CheckCorruption(memoryTypeBits);
    14318 }
    14319 
    14320 VkResult vmaDefragment(
    14321  VmaAllocator allocator,
    14322  VmaAllocation* pAllocations,
    14323  size_t allocationCount,
    14324  VkBool32* pAllocationsChanged,
    14325  const VmaDefragmentationInfo *pDefragmentationInfo,
    14326  VmaDefragmentationStats* pDefragmentationStats)
    14327 {
    14328  VMA_ASSERT(allocator && pAllocations);
    14329 
    14330  VMA_DEBUG_LOG("vmaDefragment");
    14331 
    14332  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14333 
    14334  return allocator->Defragment(pAllocations, allocationCount, pAllocationsChanged, pDefragmentationInfo, pDefragmentationStats);
    14335 }
    14336 
    14337 VkResult vmaBindBufferMemory(
    14338  VmaAllocator allocator,
    14339  VmaAllocation allocation,
    14340  VkBuffer buffer)
    14341 {
    14342  VMA_ASSERT(allocator && allocation && buffer);
    14343 
    14344  VMA_DEBUG_LOG("vmaBindBufferMemory");
    14345 
    14346  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14347 
    14348  return allocator->BindBufferMemory(allocation, buffer);
    14349 }
    14350 
    14351 VkResult vmaBindImageMemory(
    14352  VmaAllocator allocator,
    14353  VmaAllocation allocation,
    14354  VkImage image)
    14355 {
    14356  VMA_ASSERT(allocator && allocation && image);
    14357 
    14358  VMA_DEBUG_LOG("vmaBindImageMemory");
    14359 
    14360  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14361 
    14362  return allocator->BindImageMemory(allocation, image);
    14363 }
    14364 
    14365 VkResult vmaCreateBuffer(
    14366  VmaAllocator allocator,
    14367  const VkBufferCreateInfo* pBufferCreateInfo,
    14368  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14369  VkBuffer* pBuffer,
    14370  VmaAllocation* pAllocation,
    14371  VmaAllocationInfo* pAllocationInfo)
    14372 {
    14373  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    14374 
    14375  if(pBufferCreateInfo->size == 0)
    14376  {
    14377  return VK_ERROR_VALIDATION_FAILED_EXT;
    14378  }
    14379 
    14380  VMA_DEBUG_LOG("vmaCreateBuffer");
    14381 
    14382  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14383 
    14384  *pBuffer = VK_NULL_HANDLE;
    14385  *pAllocation = VK_NULL_HANDLE;
    14386 
    14387  // 1. Create VkBuffer.
    14388  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    14389  allocator->m_hDevice,
    14390  pBufferCreateInfo,
    14391  allocator->GetAllocationCallbacks(),
    14392  pBuffer);
    14393  if(res >= 0)
    14394  {
    14395  // 2. vkGetBufferMemoryRequirements.
    14396  VkMemoryRequirements vkMemReq = {};
    14397  bool requiresDedicatedAllocation = false;
    14398  bool prefersDedicatedAllocation = false;
    14399  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    14400  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14401 
    14402  // Make sure alignment requirements for specific buffer usages reported
    14403  // in Physical Device Properties are included in alignment reported by memory requirements.
    14404  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    14405  {
    14406  VMA_ASSERT(vkMemReq.alignment %
    14407  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    14408  }
    14409  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    14410  {
    14411  VMA_ASSERT(vkMemReq.alignment %
    14412  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    14413  }
    14414  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    14415  {
    14416  VMA_ASSERT(vkMemReq.alignment %
    14417  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    14418  }
    14419 
    14420  // 3. Allocate memory using allocator.
    14421  res = allocator->AllocateMemory(
    14422  vkMemReq,
    14423  requiresDedicatedAllocation,
    14424  prefersDedicatedAllocation,
    14425  *pBuffer, // dedicatedBuffer
    14426  VK_NULL_HANDLE, // dedicatedImage
    14427  *pAllocationCreateInfo,
    14428  VMA_SUBALLOCATION_TYPE_BUFFER,
    14429  pAllocation);
    14430 
    14431 #if VMA_RECORDING_ENABLED
    14432  if(allocator->GetRecorder() != VMA_NULL)
    14433  {
    14434  allocator->GetRecorder()->RecordCreateBuffer(
    14435  allocator->GetCurrentFrameIndex(),
    14436  *pBufferCreateInfo,
    14437  *pAllocationCreateInfo,
    14438  *pAllocation);
    14439  }
    14440 #endif
    14441 
    14442  if(res >= 0)
    14443  {
    14444  // 3. Bind buffer with memory.
    14445  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    14446  if(res >= 0)
    14447  {
    14448  // All steps succeeded.
    14449  #if VMA_STATS_STRING_ENABLED
    14450  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    14451  #endif
    14452  if(pAllocationInfo != VMA_NULL)
    14453  {
    14454  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14455  }
    14456 
    14457  return VK_SUCCESS;
    14458  }
    14459  allocator->FreeMemory(*pAllocation);
    14460  *pAllocation = VK_NULL_HANDLE;
    14461  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14462  *pBuffer = VK_NULL_HANDLE;
    14463  return res;
    14464  }
    14465  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    14466  *pBuffer = VK_NULL_HANDLE;
    14467  return res;
    14468  }
    14469  return res;
    14470 }
    14471 
    14472 void vmaDestroyBuffer(
    14473  VmaAllocator allocator,
    14474  VkBuffer buffer,
    14475  VmaAllocation allocation)
    14476 {
    14477  VMA_ASSERT(allocator);
    14478 
    14479  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14480  {
    14481  return;
    14482  }
    14483 
    14484  VMA_DEBUG_LOG("vmaDestroyBuffer");
    14485 
    14486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14487 
    14488 #if VMA_RECORDING_ENABLED
    14489  if(allocator->GetRecorder() != VMA_NULL)
    14490  {
    14491  allocator->GetRecorder()->RecordDestroyBuffer(
    14492  allocator->GetCurrentFrameIndex(),
    14493  allocation);
    14494  }
    14495 #endif
    14496 
    14497  if(buffer != VK_NULL_HANDLE)
    14498  {
    14499  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    14500  }
    14501 
    14502  if(allocation != VK_NULL_HANDLE)
    14503  {
    14504  allocator->FreeMemory(allocation);
    14505  }
    14506 }
    14507 
    14508 VkResult vmaCreateImage(
    14509  VmaAllocator allocator,
    14510  const VkImageCreateInfo* pImageCreateInfo,
    14511  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    14512  VkImage* pImage,
    14513  VmaAllocation* pAllocation,
    14514  VmaAllocationInfo* pAllocationInfo)
    14515 {
    14516  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    14517 
    14518  if(pImageCreateInfo->extent.width == 0 ||
    14519  pImageCreateInfo->extent.height == 0 ||
    14520  pImageCreateInfo->extent.depth == 0 ||
    14521  pImageCreateInfo->mipLevels == 0 ||
    14522  pImageCreateInfo->arrayLayers == 0)
    14523  {
    14524  return VK_ERROR_VALIDATION_FAILED_EXT;
    14525  }
    14526 
    14527  VMA_DEBUG_LOG("vmaCreateImage");
    14528 
    14529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14530 
    14531  *pImage = VK_NULL_HANDLE;
    14532  *pAllocation = VK_NULL_HANDLE;
    14533 
    14534  // 1. Create VkImage.
    14535  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    14536  allocator->m_hDevice,
    14537  pImageCreateInfo,
    14538  allocator->GetAllocationCallbacks(),
    14539  pImage);
    14540  if(res >= 0)
    14541  {
    14542  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    14543  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    14544  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    14545 
    14546  // 2. Allocate memory using allocator.
    14547  VkMemoryRequirements vkMemReq = {};
    14548  bool requiresDedicatedAllocation = false;
    14549  bool prefersDedicatedAllocation = false;
    14550  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    14551  requiresDedicatedAllocation, prefersDedicatedAllocation);
    14552 
    14553  res = allocator->AllocateMemory(
    14554  vkMemReq,
    14555  requiresDedicatedAllocation,
    14556  prefersDedicatedAllocation,
    14557  VK_NULL_HANDLE, // dedicatedBuffer
    14558  *pImage, // dedicatedImage
    14559  *pAllocationCreateInfo,
    14560  suballocType,
    14561  pAllocation);
    14562 
    14563 #if VMA_RECORDING_ENABLED
    14564  if(allocator->GetRecorder() != VMA_NULL)
    14565  {
    14566  allocator->GetRecorder()->RecordCreateImage(
    14567  allocator->GetCurrentFrameIndex(),
    14568  *pImageCreateInfo,
    14569  *pAllocationCreateInfo,
    14570  *pAllocation);
    14571  }
    14572 #endif
    14573 
    14574  if(res >= 0)
    14575  {
    14576  // 3. Bind image with memory.
    14577  res = allocator->BindImageMemory(*pAllocation, *pImage);
    14578  if(res >= 0)
    14579  {
    14580  // All steps succeeded.
    14581  #if VMA_STATS_STRING_ENABLED
    14582  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    14583  #endif
    14584  if(pAllocationInfo != VMA_NULL)
    14585  {
    14586  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    14587  }
    14588 
    14589  return VK_SUCCESS;
    14590  }
    14591  allocator->FreeMemory(*pAllocation);
    14592  *pAllocation = VK_NULL_HANDLE;
    14593  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14594  *pImage = VK_NULL_HANDLE;
    14595  return res;
    14596  }
    14597  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    14598  *pImage = VK_NULL_HANDLE;
    14599  return res;
    14600  }
    14601  return res;
    14602 }
    14603 
    14604 void vmaDestroyImage(
    14605  VmaAllocator allocator,
    14606  VkImage image,
    14607  VmaAllocation allocation)
    14608 {
    14609  VMA_ASSERT(allocator);
    14610 
    14611  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    14612  {
    14613  return;
    14614  }
    14615 
    14616  VMA_DEBUG_LOG("vmaDestroyImage");
    14617 
    14618  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    14619 
    14620 #if VMA_RECORDING_ENABLED
    14621  if(allocator->GetRecorder() != VMA_NULL)
    14622  {
    14623  allocator->GetRecorder()->RecordDestroyImage(
    14624  allocator->GetCurrentFrameIndex(),
    14625  allocation);
    14626  }
    14627 #endif
    14628 
    14629  if(image != VK_NULL_HANDLE)
    14630  {
    14631  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    14632  }
    14633  if(allocation != VK_NULL_HANDLE)
    14634  {
    14635  allocator->FreeMemory(allocation);
    14636  }
    14637 }
    14638 
    14639 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1586
    +
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:1888
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1643
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    Definition: vk_mem_alloc.h:1617
    -
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2212
    +
    size_t blockCount
    Number of VkDeviceMemory blocks allocated for this pool.
    Definition: vk_mem_alloc.h:2213
    PFN_vkCreateBuffer vkCreateBuffer
    Definition: vk_mem_alloc.h:1598
    void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    -
    Definition: vk_mem_alloc.h:1844
    -
    Definition: vk_mem_alloc.h:1947
    +
    Definition: vk_mem_alloc.h:1845
    +
    Definition: vk_mem_alloc.h:1948
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1590
    -
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2312
    +
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2313
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1640
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2582
    -
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2101
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2583
    +
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2102
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1487
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    -
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2193
    -
    Definition: vk_mem_alloc.h:1924
    +
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2194
    +
    Definition: vk_mem_alloc.h:1925
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1579
    -
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2000
    -
    Definition: vk_mem_alloc.h:1871
    +
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2001
    +
    Definition: vk_mem_alloc.h:1872
    const VkAllocationCallbacks * pAllocationCallbacks
    Custom CPU memory allocation callbacks. Optional.
    Definition: vk_mem_alloc.h:1652
    -
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2129
    +
    Enables alternative, buddy allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2130
    void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
    Retrieves statistics from current state of the Allocator.
    -
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1705
    +
    const VmaVulkanFunctions * pVulkanFunctions
    Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
    Definition: vk_mem_alloc.h:1706
    Description of a Allocator to be created.
    Definition: vk_mem_alloc.h:1637
    void vmaDestroyAllocator(VmaAllocator allocator)
    Destroys allocator object.
    -
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1875
    +
    VmaAllocationCreateFlagBits
    Flags to be passed as VmaAllocationCreateInfo::flags.
    Definition: vk_mem_alloc.h:1876
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    -
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1777
    +
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1778
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1595
    -
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1776
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2586
    +
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1777
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2587
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1669
    -
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1786
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2594
    -
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1984
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2577
    +
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1787
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2595
    +
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1985
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2578
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1596
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1521
    Represents main object of this library initialized.
    VkDevice device
    Vulkan device.
    Definition: vk_mem_alloc.h:1646
    VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
    Binds buffer to allocation.
    -
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2143
    -
    Definition: vk_mem_alloc.h:2137
    -
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1712
    -
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2322
    +
    Describes parameter of created VmaPool.
    Definition: vk_mem_alloc.h:2144
    +
    Definition: vk_mem_alloc.h:2138
    +
    const VmaRecordSettings * pRecordSettings
    Parameters for recording of VMA calls. Can be null.
    Definition: vk_mem_alloc.h:1713
    +
    VkDeviceSize size
    Size of this allocation, in bytes.
    Definition: vk_mem_alloc.h:2323
    void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
    Given Memory Type Index, returns Property Flags of this memory type.
    PFN_vkUnmapMemory vkUnmapMemory
    Definition: vk_mem_alloc.h:1591
    Enables flush after recording every function call.
    Definition: vk_mem_alloc.h:1615
    -
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2021
    -
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2163
    -
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2199
    +
    void * pUserData
    Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
    Definition: vk_mem_alloc.h:2022
    +
    size_t minBlockCount
    Minimum number of blocks to be always allocated in this pool, even if they stay empty.
    Definition: vk_mem_alloc.h:2164
    +
    size_t allocationCount
    Number of VmaAllocation objects created from this pool that were not destroyed or lost...
    Definition: vk_mem_alloc.h:2200
    struct VmaVulkanFunctions VmaVulkanFunctions
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1577
    -
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2146
    +
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2147
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1822
    +
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1823
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2572
    +
    Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2573
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2590
    -
    Definition: vk_mem_alloc.h:1861
    -
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2008
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
    Definition: vk_mem_alloc.h:2591
    +
    Definition: vk_mem_alloc.h:1862
    +
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2009
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1594
    Represents custom memory pool.
    void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
    Retrieves statistics of existing VmaPool object.
    struct VmaDefragmentationInfo VmaDefragmentationInfo
    Optional configuration parameters to be passed to function vmaDefragment().
    -
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1782
    +
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1783
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1527
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    - +
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1548
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1619
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1553
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2592
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2593
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    -
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1995
    -
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2209
    +
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:1996
    +
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2210
    void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
    Builds and returns statistics as string in JSON format.
    PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
    Definition: vk_mem_alloc.h:1587
    -
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1765
    -
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2158
    +
    Calculated statistics of memory usage in entire allocator.
    Definition: vk_mem_alloc.h:1766
    +
    VkDeviceSize blockSize
    Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
    Definition: vk_mem_alloc.h:2159
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    Definition: vk_mem_alloc.h:1540
    -
    Definition: vk_mem_alloc.h:2133
    +
    Definition: vk_mem_alloc.h:2134
    VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1931
    -
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1778
    +
    Definition: vk_mem_alloc.h:1932
    +
    VkDeviceSize unusedRangeSizeMin
    Definition: vk_mem_alloc.h:1779
    PFN_vmaFreeDeviceMemoryFunction pfnFree
    Optional, can be null.
    Definition: vk_mem_alloc.h:1544
    -
    Definition: vk_mem_alloc.h:1958
    -
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2149
    -
    Definition: vk_mem_alloc.h:1870
    +
    Definition: vk_mem_alloc.h:1959
    +
    VmaPoolCreateFlags flags
    Use combination of VmaPoolCreateFlagBits.
    Definition: vk_mem_alloc.h:2150
    +
    Definition: vk_mem_alloc.h:1871
    PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
    Definition: vk_mem_alloc.h:1593
    struct VmaPoolStats VmaPoolStats
    Describes parameter of existing VmaPool.
    VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaCreateBuffer().
    -
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1990
    -
    Definition: vk_mem_alloc.h:1981
    +
    VmaMemoryUsage usage
    Intended usage of memory.
    Definition: vk_mem_alloc.h:1991
    +
    Definition: vk_mem_alloc.h:1982
    VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
    -
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1768
    +
    uint32_t blockCount
    Number of VkDeviceMemory Vulkan memory blocks allocated.
    Definition: vk_mem_alloc.h:1769
    PFN_vkFreeMemory vkFreeMemory
    Definition: vk_mem_alloc.h:1589
    -
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2171
    +
    size_t maxBlockCount
    Maximum number of blocks that can be allocated in this pool. Optional.
    Definition: vk_mem_alloc.h:2172
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1655
    -
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2202
    -
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1979
    -
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2014
    +
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2203
    +
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:1980
    +
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2015
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    -
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1693
    -
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1784
    -
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1911
    -
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1777
    +
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1694
    +
    VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
    Definition: vk_mem_alloc.h:1785
    +
    Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
    Definition: vk_mem_alloc.h:1912
    +
    VkDeviceSize allocationSizeMin
    Definition: vk_mem_alloc.h:1778
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1600
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1625
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1542
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1599
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    -
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2185
    +
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:2186
    PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
    Definition: vk_mem_alloc.h:1592
    -
    Definition: vk_mem_alloc.h:1942
    +
    Definition: vk_mem_alloc.h:1943
    VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    Function similar to vmaAllocateMemoryForBuffer().
    struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
    Description of a Allocator to be created.
    const char * pFilePath
    Path to the file that should be written by the recording.
    Definition: vk_mem_alloc.h:1633
    -
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2336
    +
    void * pUserData
    Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
    Definition: vk_mem_alloc.h:2337
    VkDeviceSize preferredLargeHeapBlockSize
    Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
    Definition: vk_mem_alloc.h:1649
    -
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1777
    -
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1774
    +
    VkDeviceSize allocationSizeAvg
    Definition: vk_mem_alloc.h:1778
    +
    VkDeviceSize usedBytes
    Total number of bytes occupied by all allocations.
    Definition: vk_mem_alloc.h:1775
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    -
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2190
    +
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2191
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions...
    -
    Definition: vk_mem_alloc.h:1951
    -
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2317
    -
    Definition: vk_mem_alloc.h:1965
    -
    Definition: vk_mem_alloc.h:1977
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2588
    +
    Definition: vk_mem_alloc.h:1952
    +
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
    Definition: vk_mem_alloc.h:2318
    +
    Definition: vk_mem_alloc.h:1966
    +
    Definition: vk_mem_alloc.h:1978
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places...
    Definition: vk_mem_alloc.h:2589
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1585
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    -
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1772
    -
    Definition: vk_mem_alloc.h:1827
    -
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2139
    +
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1773
    +
    Definition: vk_mem_alloc.h:1828
    +
    VkFlags VmaPoolCreateFlags
    Definition: vk_mem_alloc.h:2140
    void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    Definition: vk_mem_alloc.h:1622
    -
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1770
    +
    uint32_t allocationCount
    Number of VmaAllocation allocation objects allocated.
    Definition: vk_mem_alloc.h:1771
    PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
    Definition: vk_mem_alloc.h:1597
    PFN_vkDestroyImage vkDestroyImage
    Definition: vk_mem_alloc.h:1601
    -
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1898
    -
    Definition: vk_mem_alloc.h:1972
    -
    Definition: vk_mem_alloc.h:1854
    -
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2331
    +
    Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
    Definition: vk_mem_alloc.h:1899
    +
    Definition: vk_mem_alloc.h:1973
    +
    Definition: vk_mem_alloc.h:1855
    +
    void * pMappedData
    Pointer to the beginning of this allocation as mapped data.
    Definition: vk_mem_alloc.h:2332
    void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
    Destroys Vulkan image and frees allocated memory.
    Enables usage of VK_KHR_dedicated_allocation extension.
    Definition: vk_mem_alloc.h:1575
    struct VmaDefragmentationStats VmaDefragmentationStats
    Statistics returned by function vmaDefragment().
    PFN_vkAllocateMemory vkAllocateMemory
    Definition: vk_mem_alloc.h:1588
    -
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2118
    +
    Enables alternative, linear allocation algorithm in this pool.
    Definition: vk_mem_alloc.h:2119
    VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
    Tries to resize an allocation in place, if there is enough free memory after it.
    -
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2298
    +
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    Definition: vk_mem_alloc.h:2299
    VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    General purpose memory allocation.
    void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
    Sets index of the current frame.
    struct VmaAllocationCreateInfo VmaAllocationCreateInfo
    VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
    -
    Definition: vk_mem_alloc.h:1962
    -
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2083
    -
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1778
    +
    Definition: vk_mem_alloc.h:1963
    +
    VmaPoolCreateFlagBits
    Flags to be passed as VmaPoolCreateInfo::flags.
    Definition: vk_mem_alloc.h:2084
    +
    VkDeviceSize unusedRangeSizeAvg
    Definition: vk_mem_alloc.h:1779
    VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
    Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
    - +
    VmaRecordFlagBits
    Flags to be used in VmaRecordSettings::flags.
    Definition: vk_mem_alloc.h:1609
    -
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1785
    +
    VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
    Definition: vk_mem_alloc.h:1786
    void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
    Destroys Vulkan buffer and frees allocated memory.
    -
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2196
    -
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1778
    +
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2197
    +
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1779
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    -
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2303
    +
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2304